comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
is it still possible after this change?
private void processSendWork() { if (!hasConnected.get()) { logger.warning("Not connected. Not processing send work."); return; } if (isDisposed.get()) { logger.info("Sender is closed. Not executing work."); return; } while (hasConnected.get() && sender.getCredit() > 0) { final WeightedDeliveryTag weightedDelivery; final RetriableWorkItem workItem; final String deliveryTag; synchronized (pendingSendLock) { weightedDelivery = this.pendingSendsQueue.poll(); if (weightedDelivery != null) { deliveryTag = weightedDelivery.getDeliveryTag(); workItem = this.pendingSendsMap.get(deliveryTag); } else { workItem = null; deliveryTag = null; } } if (workItem == null) { if (deliveryTag != null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("sendData not found for this delivery."); } break; } Delivery delivery = null; boolean linkAdvance = false; int sentMsgSize = 0; Exception sendException = null; try { workItem.beforeTry(); delivery = sender.delivery(deliveryTag.getBytes(UTF_8)); delivery.setMessageFormat(workItem.getMessageFormat()); if (workItem.isDeliveryStateProvided()) { delivery.disposition(workItem.getDeliveryState()); } if (workItem.getMessage() != null) { final byte[] encodedBytes = workItem.getMessage(); sentMsgSize = sender.send(encodedBytes, 0, workItem.getEncodedMessageSize()); } else { final ReadableBuffer encodedBuffer = workItem.getEncodedBuffer(); encodedBuffer.position(0); sentMsgSize = sender.send(encodedBuffer); } sentMsgSize = sender.send(workItem.getMessage(), 0, workItem.getEncodedMessageSize()); assert sentMsgSize == workItem.getEncodedMessageSize() : "Contract of the ProtonJ library for Sender. Send API changed"; linkAdvance = sender.advance(); } catch (Exception exception) { sendException = exception; } if (linkAdvance) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Sent message."); workItem.setWaitingForAck(); scheduler.schedule(new SendTimeout(deliveryTag), retryOptions.getTryTimeout().toMillis(), TimeUnit.MILLISECONDS); } else { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("sentMessageSize", sentMsgSize) .addKeyValue("payloadActualSize", workItem.getEncodedMessageSize()) .log("Sendlink advance failed."); DeliveryState outcome = null; if (delivery != null) { outcome = delivery.getRemoteState(); delivery.free(); } final AmqpErrorContext context = handler.getErrorContext(sender); final Throwable exception = sendException != null ? new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed. Please see cause for more details", entityPath), sendException, context) : new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed while advancing delivery(tag: %s).", entityPath, deliveryTag), context); workItem.error(exception, outcome); } } }
if (workItem.getMessage() != null) {
private void processSendWork() { if (!hasConnected.get()) { logger.warning("Not connected. Not processing send work."); return; } if (isDisposed.get()) { logger.info("Sender is closed. Not executing work."); return; } while (hasConnected.get() && sender.getCredit() > 0) { final WeightedDeliveryTag weightedDelivery; final RetriableWorkItem workItem; final String deliveryTag; synchronized (pendingSendLock) { weightedDelivery = this.pendingSendsQueue.poll(); if (weightedDelivery != null) { deliveryTag = weightedDelivery.getDeliveryTag(); workItem = this.pendingSendsMap.get(deliveryTag); } else { workItem = null; deliveryTag = null; } } if (workItem == null) { if (deliveryTag != null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("sendData not found for this delivery."); } break; } Delivery delivery = null; boolean linkAdvance = false; int sentMsgSize = 0; Exception sendException = null; try { workItem.beforeTry(); delivery = sender.delivery(deliveryTag.getBytes(UTF_8)); delivery.setMessageFormat(workItem.getMessageFormat()); if (workItem.isDeliveryStateProvided()) { delivery.disposition(workItem.getDeliveryState()); } workItem.send(sender); linkAdvance = sender.advance(); } catch (Exception exception) { sendException = exception; } if (linkAdvance) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Sent message."); workItem.setWaitingForAck(); scheduler.schedule(new SendTimeout(deliveryTag), retryOptions.getTryTimeout().toMillis(), TimeUnit.MILLISECONDS); } else { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("sentMessageSize", sentMsgSize) .addKeyValue("payloadActualSize", workItem.getEncodedMessageSize()) .log("Sendlink advance failed."); DeliveryState outcome = null; if (delivery != null) { outcome = delivery.getRemoteState(); delivery.free(); } final AmqpErrorContext context = handler.getErrorContext(sender); final Throwable exception = sendException != null ? new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed. Please see cause for more details", entityPath), sendException, context) : new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed while advancing delivery(tag: %s).", entityPath, deliveryTag), context); workItem.error(exception, outcome); } } }
class ReactorSender implements AmqpSendLink, AsyncCloseable, AutoCloseable { private static final String DELIVERY_TAG_KEY = "deliveryTag"; private static final String PENDING_SENDS_SIZE_KEY = "pending_sends_size"; private final String entityPath; private final Sender sender; private final SendLinkHandler handler; private final ReactorProvider reactorProvider; private final Disposable.Composite subscriptions; private final AtomicBoolean hasConnected = new AtomicBoolean(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final Object pendingSendLock = new Object(); private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>(); private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue = new PriorityQueue<>(1000, new DeliveryTagComparator()); private final ClientLogger logger; private final Flux<AmqpEndpointState> endpointStates; private final TokenManager tokenManager; private final MessageSerializer messageSerializer; private final AmqpRetryPolicy retry; private final AmqpRetryOptions retryOptions; private final String activeTimeoutMessage; private final Scheduler scheduler; private final AmqpMetricsProvider metricsProvider; private final Object errorConditionLock = new Object(); private volatile Exception lastKnownLinkError; private volatile Instant lastKnownErrorReportedAt; private volatile int linkSize; /** * Creates an instance of {@link ReactorSender}. * * @param amqpConnection The parent {@link AmqpConnection} that this sender lives in. * @param entityPath The message broker address for the sender. * @param sender The underlying proton-j sender. * @param handler The proton-j handler associated with the sender. * @param reactorProvider Provider to schedule work on the proton-j reactor. * @param tokenManager Token manager for authorising with the CBS node. Can be {@code null} if it is part of the * transaction manager. * @param messageSerializer Serializer to deserialise and serialize AMQP messages. * @param retryOptions Retry options. * @param scheduler Scheduler to schedule send timeout. */ ReactorSender(AmqpConnection amqpConnection, String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider, TokenManager tokenManager, MessageSerializer messageSerializer, AmqpRetryOptions retryOptions, Scheduler scheduler, AmqpMetricsProvider metricsProvider) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.sender = Objects.requireNonNull(sender, "'sender' cannot be null."); this.handler = Objects.requireNonNull(handler, "'handler' cannot be null."); this.reactorProvider = Objects.requireNonNull(reactorProvider, "'reactorProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.scheduler = Objects.requireNonNull(scheduler, "'scheduler' cannot be null."); this.retry = RetryUtil.getRetryPolicy(retryOptions); this.tokenManager = tokenManager; this.metricsProvider = metricsProvider; String connectionId = handler.getConnectionId() == null ? NOT_APPLICABLE : handler.getConnectionId(); String linkName = getLinkName() == null ? NOT_APPLICABLE : getLinkName(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(LINK_NAME_KEY, linkName); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(ReactorSender.class, loggingContext); this.activeTimeoutMessage = String.format( "ReactorSender connectionId[%s] linkName[%s]: Waiting for send and receive handler to be ACTIVE", handler.getConnectionId(), handler.getLinkName()); this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.verbose("State {}", state); this.hasConnected.set(state == EndpointState.ACTIVE); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { hasConnected.set(false); handleError(error); }) .doOnComplete(() -> { hasConnected.set(false); handleClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage), this.handler.getLinkCredits().subscribe(credit -> { logger.atVerbose().addKeyValue("credits", credit) .log("Credits on link."); this.scheduleWorkOnDispatcher(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); hasConnected.set(false); return closeAsync("Connection shutdown.", null); }).subscribe() ); if (tokenManager != null) { this.subscriptions.add(tokenManager.getAuthorizationResults().onErrorResume(error -> { final Mono<Void> operation = closeAsync(String.format("connectionId[%s] linkName[%s] Token renewal failure. Disposing send " + "link.", amqpConnection.getId(), getLinkName()), new ErrorCondition(Symbol.getSymbol(NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> { logger.atVerbose().addKeyValue("response", response) .log("Token refreshed."); }, error -> { }, () -> { logger.verbose(" Authorization completed. Disposing."); closeAsync("Authorization completed. Disposing.", null).subscribe(); })); } } @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } @Override public Mono<Void> send(Message message) { return send(message, null); } @Override public Mono<Void> send(Message message, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish message when disposed.", handler.getConnectionId(), getLinkName()))); } return getLinkSize() .flatMap(maxMessageSize -> { final int payloadSize = messageSerializer.getSize(message); final int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] bytes = new byte[allocationSize]; int encodedSize; try { encodedSize = message.encode(bytes, 0, allocationSize); } catch (BufferOverflowException exception) { final String errorMessage = String.format(Locale.US, "Error sending. Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024); final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, errorMessage, exception, handler.getErrorContext(sender)); return Mono.error(error); } return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState); }).then(); } @Override public Mono<Void> send(List<Message> messageBatch) { return send(messageBatch, null); } @Override public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish data batch when disposed.", handler.getConnectionId(), getLinkName()))); } if (messageBatch.size() == 1) { return send(messageBatch.get(0), deliveryState); } return getLinkSize() .flatMap(maxMessageSize -> { final Message firstMessage = messageBatch.get(0); final Message batchMessage = Proton.message(); batchMessage.setMessageAnnotations(firstMessage.getMessageAnnotations()); final int maxMessageSizeTemp = maxMessageSize; final byte[] bytes = new byte[maxMessageSizeTemp]; int encodedSize = batchMessage.encode(bytes, 0, maxMessageSizeTemp); int byteArrayOffset = encodedSize; for (final Message amqpMessage : messageBatch) { final Message messageWrappedByData = Proton.message(); int payloadSize = messageSerializer.getSize(amqpMessage); int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSizeTemp); byte[] messageBytes = new byte[allocationSize]; int messageSizeBytes = amqpMessage.encode(messageBytes, 0, allocationSize); messageWrappedByData.setBody(new Data(new Binary(messageBytes, 0, messageSizeBytes))); try { encodedSize = messageWrappedByData .encode(bytes, byteArrayOffset, maxMessageSizeTemp - byteArrayOffset - 1); } catch (BufferOverflowException exception) { final String message = String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSizeTemp / 1024); final AmqpException error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, exception, handler.getErrorContext(sender)); return Mono.error(error); } byteArrayOffset = byteArrayOffset + encodedSize; } return send(bytes, byteArrayOffset, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState); }).then(); } private Mono<Void> batchSend(List<Message> batch, int maxMessageSize, DeliveryState deliveryState) { int totalEncodedSize = 0; final CompositeReadableBuffer buffer = new CompositeReadableBuffer(); final byte[] envelopBytes = batchEnvelopBytes(batch.get(0), maxMessageSize); totalEncodedSize += envelopBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(envelopBytes); for (final Message message : batch) { final byte[] sectionBytes = batchBinaryDataSectionBytes(message, maxMessageSize); totalEncodedSize += sectionBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(sectionBytes); } final Flux<EndpointState> activeEndpointFlux = RetryUtil.withRetry( handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), retryOptions, activeTimeoutMessage); final int totalEncodedBufferSize = totalEncodedSize; Mono<DeliveryState> sendMono = activeEndpointFlux.then(Mono.create(sink -> { sendWork(new RetriableWorkItem(buffer, totalEncodedBufferSize, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); return sendMono.then(); } private byte[] batchEnvelopBytes(Message envelopMessage, int maxMessageSize) { final Message message = Proton.message(); message.setMessageAnnotations(envelopMessage.getMessageAnnotations()); final int size = messageSerializer.getSize(message); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = message.encode(encodedBytes, 0, allocationSize); return Arrays.copyOf(encodedBytes, encodedSize); } private byte[] batchBinaryDataSectionBytes(Message sectionMessage, int maxMessageSize) { final int size = messageSerializer.getSize(sectionMessage); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = sectionMessage.encode(encodedBytes, 0, allocationSize); final Message message = Proton.message(); final Data binaryData = new Data(new Binary(encodedBytes, 0, encodedSize)); message.setBody(binaryData); final int binaryRawSize = binaryData.getValue().getLength(); final int binaryEncodedSize = binaryEncodedSize(binaryRawSize); final byte[] binaryEncodedBytes = new byte[binaryEncodedSize]; message.encode(binaryEncodedBytes, 0, binaryEncodedSize); return binaryEncodedBytes; } private Mono<Void> batchBufferOverflowError(int maxMessageSize) { return Mono.error(new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), new BufferOverflowException(), handler.getErrorContext(sender))); } private int binaryEncodedSize(int binaryRawSize) { if (binaryRawSize <= 255) { return 5 + binaryRawSize; } else { return 8 + binaryRawSize; } } @Override public AmqpErrorContext getErrorContext() { return handler.getErrorContext(sender); } @Override public String getLinkName() { return sender.getName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override public Mono<Integer> getLinkSize() { if (linkSize > 0) { return Mono.defer(() -> Mono.just(this.linkSize)); } synchronized (this) { if (linkSize > 0) { return Mono.defer(() -> Mono.just(linkSize)); } return RetryUtil.withRetry(getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE), retryOptions, activeTimeoutMessage) .then(Mono.fromCallable(() -> { final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize(); if (remoteMaxMessageSize != null) { linkSize = remoteMaxMessageSize.intValue(); } else { logger.warning("Could not get the getRemoteMaxMessageSize. Returning current link size: {}", linkSize); } return linkSize; })); } } @Override public boolean isDisposed() { return isDisposed.get(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void dispose() { close(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } /** * Disposes of the sender. * * @param errorCondition Error condition associated with close operation. * @param message Message associated with why the sender was closed. * * @return A mono that completes when the send link has closed. */ Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return isClosedMono.asMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .log("Setting error condition and disposing. {}", message); final Runnable closeWork = () -> { if (errorCondition != null && sender.getCondition() == null) { sender.setCondition(errorCondition); } sender.close(); }; return Mono.fromRunnable(() -> { try { reactorProvider.getReactorDispatcher().invoke(closeWork); } catch (IOException e) { logger.warning("Could not schedule close work. Running manually. And completing close.", e); closeWork.run(); handleClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException scheduling close work. And completing close."); closeWork.run(); handleClose(); } }).then(isClosedMono.asMono()) .publishOn(Schedulers.boundedElastic()); } /** * A mono that completes when the sender has completely closed. * * @return mono that completes when the sender has completely closed. */ Mono<Void> isClosed() { return isClosedMono.asMono(); } @Override public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) { final Flux<EndpointState> activeEndpointFlux = RetryUtil.withRetry( handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), retryOptions, activeTimeoutMessage); return activeEndpointFlux.then(Mono.create(sink -> { sendWork(new RetriableWorkItem(bytes, arrayOffset, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } /** * Add the work item in pending send to be processed on {@link ReactorDispatcher} thread. * * @param workItem to be processed. */ private void sendWork(RetriableWorkItem workItem) { final String deliveryTag = UUID.randomUUID().toString().replace("-", ""); synchronized (pendingSendLock) { this.pendingSendsMap.put(deliveryTag, workItem); this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0)); } this.scheduleWorkOnDispatcher(); } /** * Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke() */ private void processDeliveredMessage(Delivery delivery) { final DeliveryState outcome = delivery.getRemoteState(); final String deliveryTag = new String(delivery.getTag(), UTF_8); logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Process delivered message."); final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Mismatch (or send timed out)."); return; } else if (workItem.isDeliveryStateProvided()) { workItem.success(outcome); return; } if (outcome instanceof Accepted) { synchronized (errorConditionLock) { lastKnownLinkError = null; lastKnownErrorReportedAt = null; retryAttempts.set(0); } workItem.success(outcome); } else if (outcome instanceof Rejected) { final Rejected rejected = (Rejected) outcome; final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError(); final Exception exception = ExceptionUtil.toException(error.getCondition().toString(), error.getDescription(), handler.getErrorContext(sender)); logger.atWarning() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("rejected", rejected) .log("Delivery rejected."); final int retryAttempt; if (isGeneralSendError(error.getCondition())) { synchronized (errorConditionLock) { lastKnownLinkError = exception; lastKnownErrorReportedAt = Instant.now(); retryAttempt = retryAttempts.incrementAndGet(); } } else { retryAttempt = retryAttempts.get(); } final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt); if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) { cleanupFailedSend(workItem, exception, outcome); } else { workItem.setLastKnownException(exception); try { reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval); } catch (IOException | RejectedExecutionException schedulerException) { exception.initCause(schedulerException); cleanupFailedSend( workItem, new AmqpException(false, String.format(Locale.US, "Entity(%s): send operation failed while scheduling a" + " retry on Reactor, see cause for more details.", entityPath), schedulerException, handler.getErrorContext(sender)), outcome); } } } else if (outcome instanceof Released) { cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(), handler.getErrorContext(sender)), outcome); } else if (outcome instanceof Declared) { final Declared declared = (Declared) outcome; workItem.success(declared); } else { cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(), handler.getErrorContext(sender)), outcome); } } private void scheduleWorkOnDispatcher() { try { reactorProvider.getReactorDispatcher().invoke(this::processSendWork); } catch (IOException e) { logger.warning("Error scheduling work on reactor.", e); } catch (RejectedExecutionException e) { logger.info("Error scheduling work on reactor because of RejectedExecutionException."); } } private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception, final DeliveryState deliveryState) { workItem.error(exception, deliveryState); } private void completeClose() { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result).log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } } /** * Clears pending sends and puts an error in there. * * @param error Error to pass to pending sends. */ private void handleError(Throwable error) { synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed. Dropping error."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends with error."); } pendingSendsMap.forEach((key, value) -> value.error(error, null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private void handleClose() { final String message = String.format("Could not complete sends because link '%s' for '%s' is closed.", getLinkName(), entityPath); final AmqpErrorContext context = handler.getErrorContext(sender); synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends."); } pendingSendsMap.forEach((key, value) -> value.error(new AmqpException(true, message, context), null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private static boolean isGeneralSendError(Symbol amqpError) { return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR || amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED); } private static class WeightedDeliveryTag { private final String deliveryTag; private final int priority; WeightedDeliveryTag(final String deliveryTag, final int priority) { this.deliveryTag = deliveryTag; this.priority = priority; } private String getDeliveryTag() { return this.deliveryTag; } private int getPriority() { return this.priority; } } private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable { private static final long serialVersionUID = -7057500582037295635L; @Override public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) { return deliveryTag1.getPriority() - deliveryTag0.getPriority(); } } /** * Keeps track of messages that have been sent, but may not have been acknowledged by the service. */ private class SendTimeout implements Runnable { private final String deliveryTag; SendTimeout(String deliveryTag) { this.deliveryTag = deliveryTag; } @Override public void run() { final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { return; } Exception cause = lastKnownLinkError; final Exception lastError; final Instant lastErrorTime; synchronized (errorConditionLock) { lastError = lastKnownLinkError; lastErrorTime = lastKnownErrorReportedAt; } if (lastError != null && lastErrorTime != null) { final Instant now = Instant.now(); final boolean isLastErrorAfterSleepTime = lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS)); final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime; final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(retryOptions.getTryTimeout())); cause = isServerBusy || isLastErrorAfterOperationTimeout ? lastError : null; } final AmqpException exception; if (cause instanceof AmqpException) { exception = (AmqpException) cause; } else { exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR, String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath), handler.getErrorContext(sender)); } workItem.error(exception, null); } } }
class ReactorSender implements AmqpSendLink, AsyncCloseable, AutoCloseable { private static final String DELIVERY_TAG_KEY = "deliveryTag"; private static final String PENDING_SENDS_SIZE_KEY = "pending_sends_size"; private final String entityPath; private final Sender sender; private final SendLinkHandler handler; private final ReactorProvider reactorProvider; private final Disposable.Composite subscriptions; private final AtomicBoolean hasConnected = new AtomicBoolean(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final Object pendingSendLock = new Object(); private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>(); private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue = new PriorityQueue<>(1000, new DeliveryTagComparator()); private final ClientLogger logger; private final Flux<AmqpEndpointState> endpointStates; private final TokenManager tokenManager; private final MessageSerializer messageSerializer; private final AmqpRetryPolicy retry; private final AmqpRetryOptions retryOptions; private final String activeTimeoutMessage; private final Scheduler scheduler; private final AmqpMetricsProvider metricsProvider; private final Object errorConditionLock = new Object(); private volatile Exception lastKnownLinkError; private volatile Instant lastKnownErrorReportedAt; private volatile int linkSize; /** * Creates an instance of {@link ReactorSender}. * * @param amqpConnection The parent {@link AmqpConnection} that this sender lives in. * @param entityPath The message broker address for the sender. * @param sender The underlying proton-j sender. * @param handler The proton-j handler associated with the sender. * @param reactorProvider Provider to schedule work on the proton-j reactor. * @param tokenManager Token manager for authorising with the CBS node. Can be {@code null} if it is part of the * transaction manager. * @param messageSerializer Serializer to deserialise and serialize AMQP messages. * @param retryOptions Retry options. * @param scheduler Scheduler to schedule send timeout. */ ReactorSender(AmqpConnection amqpConnection, String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider, TokenManager tokenManager, MessageSerializer messageSerializer, AmqpRetryOptions retryOptions, Scheduler scheduler, AmqpMetricsProvider metricsProvider) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.sender = Objects.requireNonNull(sender, "'sender' cannot be null."); this.handler = Objects.requireNonNull(handler, "'handler' cannot be null."); this.reactorProvider = Objects.requireNonNull(reactorProvider, "'reactorProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.scheduler = Objects.requireNonNull(scheduler, "'scheduler' cannot be null."); this.retry = RetryUtil.getRetryPolicy(retryOptions); this.tokenManager = tokenManager; this.metricsProvider = metricsProvider; String connectionId = handler.getConnectionId() == null ? NOT_APPLICABLE : handler.getConnectionId(); String linkName = getLinkName() == null ? NOT_APPLICABLE : getLinkName(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(LINK_NAME_KEY, linkName); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(ReactorSender.class, loggingContext); this.activeTimeoutMessage = String.format( "ReactorSender connectionId[%s] linkName[%s]: Waiting for send and receive handler to be ACTIVE", handler.getConnectionId(), handler.getLinkName()); this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.verbose("State {}", state); this.hasConnected.set(state == EndpointState.ACTIVE); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { hasConnected.set(false); handleError(error); }) .doOnComplete(() -> { hasConnected.set(false); handleClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage), this.handler.getLinkCredits().subscribe(credit -> { logger.atVerbose().addKeyValue("credits", credit) .log("Credits on link."); this.scheduleWorkOnDispatcher(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); hasConnected.set(false); return closeAsync("Connection shutdown.", null); }).subscribe() ); if (tokenManager != null) { this.subscriptions.add(tokenManager.getAuthorizationResults().onErrorResume(error -> { final Mono<Void> operation = closeAsync(String.format("connectionId[%s] linkName[%s] Token renewal failure. Disposing send " + "link.", amqpConnection.getId(), getLinkName()), new ErrorCondition(Symbol.getSymbol(NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> { logger.atVerbose().addKeyValue("response", response) .log("Token refreshed."); }, error -> { }, () -> { logger.verbose(" Authorization completed. Disposing."); closeAsync("Authorization completed. Disposing.", null).subscribe(); })); } } @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } @Override public Mono<Void> send(Message message) { return send(message, null); } @Override public Mono<Void> send(Message message, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish message when disposed.", handler.getConnectionId(), getLinkName()))); } return getLinkSize() .flatMap(maxMessageSize -> { final int payloadSize = messageSerializer.getSize(message); final int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] bytes = new byte[allocationSize]; int encodedSize; try { encodedSize = message.encode(bytes, 0, allocationSize); } catch (BufferOverflowException exception) { final String errorMessage = String.format(Locale.US, "Error sending. Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024); final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, errorMessage, exception, handler.getErrorContext(sender)); return Mono.error(error); } return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState); }).then(); } @Override public Mono<Void> send(List<Message> messageBatch) { return send(messageBatch, null); } @Override public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish data batch when disposed.", handler.getConnectionId(), getLinkName()))); } if (messageBatch.size() == 1) { return send(messageBatch.get(0), deliveryState); } return getLinkSize() .flatMap(maxMessageSize -> { int totalEncodedSize = 0; final CompositeReadableBuffer buffer = new CompositeReadableBuffer(); final byte[] envelopBytes = batchEnvelopBytes(messageBatch.get(0), maxMessageSize); if (envelopBytes.length > 0) { totalEncodedSize += envelopBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(envelopBytes); } for (final Message message : messageBatch) { final byte[] sectionBytes = batchBinaryDataSectionBytes(message, maxMessageSize); if (sectionBytes.length > 0) { totalEncodedSize += sectionBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(sectionBytes); } else { logger.info("Ignoring the empty message org.apache.qpid.proton.message.message@{} in the batch.", Integer.toHexString(System.identityHashCode(message))); } } return send(buffer, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState); }).then(); } private byte[] batchEnvelopBytes(Message envelopMessage, int maxMessageSize) { final Message message = Proton.message(); message.setMessageAnnotations(envelopMessage.getMessageAnnotations()); if ((envelopMessage.getMessageId() instanceof String) && !CoreUtils.isNullOrEmpty((String) envelopMessage.getMessageId())) { message.setMessageId(envelopMessage.getMessageId()); } if (!CoreUtils.isNullOrEmpty(envelopMessage.getGroupId())) { message.setGroupId(envelopMessage.getGroupId()); } final int size = messageSerializer.getSize(message); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = message.encode(encodedBytes, 0, allocationSize); return Arrays.copyOf(encodedBytes, encodedSize); } private byte[] batchBinaryDataSectionBytes(Message sectionMessage, int maxMessageSize) { final int size = messageSerializer.getSize(sectionMessage); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = sectionMessage.encode(encodedBytes, 0, allocationSize); final Message message = Proton.message(); final Data binaryData = new Data(new Binary(encodedBytes, 0, encodedSize)); message.setBody(binaryData); final int binaryRawSize = binaryData.getValue().getLength(); final int binaryEncodedSize = binaryEncodedSize(binaryRawSize); final byte[] binaryEncodedBytes = new byte[binaryEncodedSize]; message.encode(binaryEncodedBytes, 0, binaryEncodedSize); return binaryEncodedBytes; } private Mono<Void> batchBufferOverflowError(int maxMessageSize) { return FluxUtil.monoError(logger, new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), new BufferOverflowException(), handler.getErrorContext(sender))); } /** * Compute the encoded size when encoding a binary data of given size per Amqp 1.0 spec "amqp:data:binary" format. * * @param binaryRawSize the length of the binary data. * @return the encoded size. */ private int binaryEncodedSize(int binaryRawSize) { if (binaryRawSize <= 255) { return 5 + binaryRawSize; } else { return 8 + binaryRawSize; } } @Override public AmqpErrorContext getErrorContext() { return handler.getErrorContext(sender); } @Override public String getLinkName() { return sender.getName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override public Mono<Integer> getLinkSize() { if (linkSize > 0) { return Mono.defer(() -> Mono.just(this.linkSize)); } synchronized (this) { if (linkSize > 0) { return Mono.defer(() -> Mono.just(linkSize)); } return RetryUtil.withRetry(getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE), retryOptions, activeTimeoutMessage) .then(Mono.fromCallable(() -> { final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize(); if (remoteMaxMessageSize != null) { linkSize = remoteMaxMessageSize.intValue(); } else { logger.warning("Could not get the getRemoteMaxMessageSize. Returning current link size: {}", linkSize); } return linkSize; })); } } @Override public boolean isDisposed() { return isDisposed.get(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void dispose() { close(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } /** * Disposes of the sender. * * @param errorCondition Error condition associated with close operation. * @param message Message associated with why the sender was closed. * * @return A mono that completes when the send link has closed. */ Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return isClosedMono.asMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .log("Setting error condition and disposing. {}", message); final Runnable closeWork = () -> { if (errorCondition != null && sender.getCondition() == null) { sender.setCondition(errorCondition); } sender.close(); }; return Mono.fromRunnable(() -> { try { reactorProvider.getReactorDispatcher().invoke(closeWork); } catch (IOException e) { logger.warning("Could not schedule close work. Running manually. And completing close.", e); closeWork.run(); handleClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException scheduling close work. And completing close."); closeWork.run(); handleClose(); } }).then(isClosedMono.asMono()) .publishOn(Schedulers.boundedElastic()); } /** * A mono that completes when the sender has completely closed. * * @return mono that completes when the sender has completely closed. */ Mono<Void> isClosed() { return isClosedMono.asMono(); } @Override public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) { return onEndpointActive().then(Mono.create(sink -> { sendWork(new RetriableWorkItem(bytes, arrayOffset, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } Mono<DeliveryState> send(ReadableBuffer buffer, int messageFormat, DeliveryState deliveryState) { return onEndpointActive().then(Mono.create(sink -> { sendWork(new RetriableWorkItem(buffer, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } private Flux<EndpointState> onEndpointActive() { return RetryUtil.withRetry(handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), retryOptions, activeTimeoutMessage); } /** * Add the work item in pending send to be processed on {@link ReactorDispatcher} thread. * * @param workItem to be processed. */ private void sendWork(RetriableWorkItem workItem) { final String deliveryTag = UUID.randomUUID().toString().replace("-", ""); synchronized (pendingSendLock) { this.pendingSendsMap.put(deliveryTag, workItem); this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0)); } this.scheduleWorkOnDispatcher(); } /** * Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke() */ private void processDeliveredMessage(Delivery delivery) { final DeliveryState outcome = delivery.getRemoteState(); final String deliveryTag = new String(delivery.getTag(), UTF_8); logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Process delivered message."); final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Mismatch (or send timed out)."); return; } else if (workItem.isDeliveryStateProvided()) { workItem.success(outcome); return; } if (outcome instanceof Accepted) { synchronized (errorConditionLock) { lastKnownLinkError = null; lastKnownErrorReportedAt = null; retryAttempts.set(0); } workItem.success(outcome); } else if (outcome instanceof Rejected) { final Rejected rejected = (Rejected) outcome; final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError(); final Exception exception = ExceptionUtil.toException(error.getCondition().toString(), error.getDescription(), handler.getErrorContext(sender)); logger.atWarning() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("rejected", rejected) .log("Delivery rejected."); final int retryAttempt; if (isGeneralSendError(error.getCondition())) { synchronized (errorConditionLock) { lastKnownLinkError = exception; lastKnownErrorReportedAt = Instant.now(); retryAttempt = retryAttempts.incrementAndGet(); } } else { retryAttempt = retryAttempts.get(); } final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt); if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) { cleanupFailedSend(workItem, exception, outcome); } else { workItem.setLastKnownException(exception); try { reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval); } catch (IOException | RejectedExecutionException schedulerException) { exception.initCause(schedulerException); cleanupFailedSend( workItem, new AmqpException(false, String.format(Locale.US, "Entity(%s): send operation failed while scheduling a" + " retry on Reactor, see cause for more details.", entityPath), schedulerException, handler.getErrorContext(sender)), outcome); } } } else if (outcome instanceof Released) { cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(), handler.getErrorContext(sender)), outcome); } else if (outcome instanceof Declared) { final Declared declared = (Declared) outcome; workItem.success(declared); } else { cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(), handler.getErrorContext(sender)), outcome); } } private void scheduleWorkOnDispatcher() { try { reactorProvider.getReactorDispatcher().invoke(this::processSendWork); } catch (IOException e) { logger.warning("Error scheduling work on reactor.", e); } catch (RejectedExecutionException e) { logger.info("Error scheduling work on reactor because of RejectedExecutionException."); } } private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception, final DeliveryState deliveryState) { workItem.error(exception, deliveryState); } private void completeClose() { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result).log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } } /** * Clears pending sends and puts an error in there. * * @param error Error to pass to pending sends. */ private void handleError(Throwable error) { synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed. Dropping error."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends with error."); } pendingSendsMap.forEach((key, value) -> value.error(error, null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private void handleClose() { final String message = String.format("Could not complete sends because link '%s' for '%s' is closed.", getLinkName(), entityPath); final AmqpErrorContext context = handler.getErrorContext(sender); synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends."); } pendingSendsMap.forEach((key, value) -> value.error(new AmqpException(true, message, context), null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private static boolean isGeneralSendError(Symbol amqpError) { return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR || amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED); } private static class WeightedDeliveryTag { private final String deliveryTag; private final int priority; WeightedDeliveryTag(final String deliveryTag, final int priority) { this.deliveryTag = deliveryTag; this.priority = priority; } private String getDeliveryTag() { return this.deliveryTag; } private int getPriority() { return this.priority; } } private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable { private static final long serialVersionUID = -7057500582037295635L; @Override public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) { return deliveryTag1.getPriority() - deliveryTag0.getPriority(); } } /** * Keeps track of messages that have been sent, but may not have been acknowledged by the service. */ private class SendTimeout implements Runnable { private final String deliveryTag; SendTimeout(String deliveryTag) { this.deliveryTag = deliveryTag; } @Override public void run() { final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { return; } Exception cause = lastKnownLinkError; final Exception lastError; final Instant lastErrorTime; synchronized (errorConditionLock) { lastError = lastKnownLinkError; lastErrorTime = lastKnownErrorReportedAt; } if (lastError != null && lastErrorTime != null) { final Instant now = Instant.now(); final boolean isLastErrorAfterSleepTime = lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS)); final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime; final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(retryOptions.getTryTimeout())); cause = isServerBusy || isLastErrorAfterOperationTimeout ? lastError : null; } final AmqpException exception; if (cause instanceof AmqpException) { exception = (AmqpException) cause; } else { exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR, String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath), handler.getErrorContext(sender)); } workItem.error(exception, null); } } }
Thank you!, rewind is the correct api.
private void processSendWork() { if (!hasConnected.get()) { logger.warning("Not connected. Not processing send work."); return; } if (isDisposed.get()) { logger.info("Sender is closed. Not executing work."); return; } while (hasConnected.get() && sender.getCredit() > 0) { final WeightedDeliveryTag weightedDelivery; final RetriableWorkItem workItem; final String deliveryTag; synchronized (pendingSendLock) { weightedDelivery = this.pendingSendsQueue.poll(); if (weightedDelivery != null) { deliveryTag = weightedDelivery.getDeliveryTag(); workItem = this.pendingSendsMap.get(deliveryTag); } else { workItem = null; deliveryTag = null; } } if (workItem == null) { if (deliveryTag != null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("sendData not found for this delivery."); } break; } Delivery delivery = null; boolean linkAdvance = false; int sentMsgSize = 0; Exception sendException = null; try { workItem.beforeTry(); delivery = sender.delivery(deliveryTag.getBytes(UTF_8)); delivery.setMessageFormat(workItem.getMessageFormat()); if (workItem.isDeliveryStateProvided()) { delivery.disposition(workItem.getDeliveryState()); } if (workItem.getMessage() != null) { final byte[] encodedBytes = workItem.getMessage(); sentMsgSize = sender.send(encodedBytes, 0, workItem.getEncodedMessageSize()); } else { final ReadableBuffer encodedBuffer = workItem.getEncodedBuffer(); encodedBuffer.position(0); sentMsgSize = sender.send(encodedBuffer); } sentMsgSize = sender.send(workItem.getMessage(), 0, workItem.getEncodedMessageSize()); assert sentMsgSize == workItem.getEncodedMessageSize() : "Contract of the ProtonJ library for Sender. Send API changed"; linkAdvance = sender.advance(); } catch (Exception exception) { sendException = exception; } if (linkAdvance) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Sent message."); workItem.setWaitingForAck(); scheduler.schedule(new SendTimeout(deliveryTag), retryOptions.getTryTimeout().toMillis(), TimeUnit.MILLISECONDS); } else { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("sentMessageSize", sentMsgSize) .addKeyValue("payloadActualSize", workItem.getEncodedMessageSize()) .log("Sendlink advance failed."); DeliveryState outcome = null; if (delivery != null) { outcome = delivery.getRemoteState(); delivery.free(); } final AmqpErrorContext context = handler.getErrorContext(sender); final Throwable exception = sendException != null ? new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed. Please see cause for more details", entityPath), sendException, context) : new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed while advancing delivery(tag: %s).", entityPath, deliveryTag), context); workItem.error(exception, outcome); } } }
encodedBuffer.position(0);
private void processSendWork() { if (!hasConnected.get()) { logger.warning("Not connected. Not processing send work."); return; } if (isDisposed.get()) { logger.info("Sender is closed. Not executing work."); return; } while (hasConnected.get() && sender.getCredit() > 0) { final WeightedDeliveryTag weightedDelivery; final RetriableWorkItem workItem; final String deliveryTag; synchronized (pendingSendLock) { weightedDelivery = this.pendingSendsQueue.poll(); if (weightedDelivery != null) { deliveryTag = weightedDelivery.getDeliveryTag(); workItem = this.pendingSendsMap.get(deliveryTag); } else { workItem = null; deliveryTag = null; } } if (workItem == null) { if (deliveryTag != null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("sendData not found for this delivery."); } break; } Delivery delivery = null; boolean linkAdvance = false; int sentMsgSize = 0; Exception sendException = null; try { workItem.beforeTry(); delivery = sender.delivery(deliveryTag.getBytes(UTF_8)); delivery.setMessageFormat(workItem.getMessageFormat()); if (workItem.isDeliveryStateProvided()) { delivery.disposition(workItem.getDeliveryState()); } workItem.send(sender); linkAdvance = sender.advance(); } catch (Exception exception) { sendException = exception; } if (linkAdvance) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Sent message."); workItem.setWaitingForAck(); scheduler.schedule(new SendTimeout(deliveryTag), retryOptions.getTryTimeout().toMillis(), TimeUnit.MILLISECONDS); } else { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("sentMessageSize", sentMsgSize) .addKeyValue("payloadActualSize", workItem.getEncodedMessageSize()) .log("Sendlink advance failed."); DeliveryState outcome = null; if (delivery != null) { outcome = delivery.getRemoteState(); delivery.free(); } final AmqpErrorContext context = handler.getErrorContext(sender); final Throwable exception = sendException != null ? new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed. Please see cause for more details", entityPath), sendException, context) : new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed while advancing delivery(tag: %s).", entityPath, deliveryTag), context); workItem.error(exception, outcome); } } }
class ReactorSender implements AmqpSendLink, AsyncCloseable, AutoCloseable { private static final String DELIVERY_TAG_KEY = "deliveryTag"; private static final String PENDING_SENDS_SIZE_KEY = "pending_sends_size"; private final String entityPath; private final Sender sender; private final SendLinkHandler handler; private final ReactorProvider reactorProvider; private final Disposable.Composite subscriptions; private final AtomicBoolean hasConnected = new AtomicBoolean(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final Object pendingSendLock = new Object(); private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>(); private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue = new PriorityQueue<>(1000, new DeliveryTagComparator()); private final ClientLogger logger; private final Flux<AmqpEndpointState> endpointStates; private final TokenManager tokenManager; private final MessageSerializer messageSerializer; private final AmqpRetryPolicy retry; private final AmqpRetryOptions retryOptions; private final String activeTimeoutMessage; private final Scheduler scheduler; private final AmqpMetricsProvider metricsProvider; private final Object errorConditionLock = new Object(); private volatile Exception lastKnownLinkError; private volatile Instant lastKnownErrorReportedAt; private volatile int linkSize; /** * Creates an instance of {@link ReactorSender}. * * @param amqpConnection The parent {@link AmqpConnection} that this sender lives in. * @param entityPath The message broker address for the sender. * @param sender The underlying proton-j sender. * @param handler The proton-j handler associated with the sender. * @param reactorProvider Provider to schedule work on the proton-j reactor. * @param tokenManager Token manager for authorising with the CBS node. Can be {@code null} if it is part of the * transaction manager. * @param messageSerializer Serializer to deserialise and serialize AMQP messages. * @param retryOptions Retry options. * @param scheduler Scheduler to schedule send timeout. */ ReactorSender(AmqpConnection amqpConnection, String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider, TokenManager tokenManager, MessageSerializer messageSerializer, AmqpRetryOptions retryOptions, Scheduler scheduler, AmqpMetricsProvider metricsProvider) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.sender = Objects.requireNonNull(sender, "'sender' cannot be null."); this.handler = Objects.requireNonNull(handler, "'handler' cannot be null."); this.reactorProvider = Objects.requireNonNull(reactorProvider, "'reactorProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.scheduler = Objects.requireNonNull(scheduler, "'scheduler' cannot be null."); this.retry = RetryUtil.getRetryPolicy(retryOptions); this.tokenManager = tokenManager; this.metricsProvider = metricsProvider; String connectionId = handler.getConnectionId() == null ? NOT_APPLICABLE : handler.getConnectionId(); String linkName = getLinkName() == null ? NOT_APPLICABLE : getLinkName(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(LINK_NAME_KEY, linkName); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(ReactorSender.class, loggingContext); this.activeTimeoutMessage = String.format( "ReactorSender connectionId[%s] linkName[%s]: Waiting for send and receive handler to be ACTIVE", handler.getConnectionId(), handler.getLinkName()); this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.verbose("State {}", state); this.hasConnected.set(state == EndpointState.ACTIVE); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { hasConnected.set(false); handleError(error); }) .doOnComplete(() -> { hasConnected.set(false); handleClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage), this.handler.getLinkCredits().subscribe(credit -> { logger.atVerbose().addKeyValue("credits", credit) .log("Credits on link."); this.scheduleWorkOnDispatcher(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); hasConnected.set(false); return closeAsync("Connection shutdown.", null); }).subscribe() ); if (tokenManager != null) { this.subscriptions.add(tokenManager.getAuthorizationResults().onErrorResume(error -> { final Mono<Void> operation = closeAsync(String.format("connectionId[%s] linkName[%s] Token renewal failure. Disposing send " + "link.", amqpConnection.getId(), getLinkName()), new ErrorCondition(Symbol.getSymbol(NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> { logger.atVerbose().addKeyValue("response", response) .log("Token refreshed."); }, error -> { }, () -> { logger.verbose(" Authorization completed. Disposing."); closeAsync("Authorization completed. Disposing.", null).subscribe(); })); } } @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } @Override public Mono<Void> send(Message message) { return send(message, null); } @Override public Mono<Void> send(Message message, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish message when disposed.", handler.getConnectionId(), getLinkName()))); } return getLinkSize() .flatMap(maxMessageSize -> { final int payloadSize = messageSerializer.getSize(message); final int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] bytes = new byte[allocationSize]; int encodedSize; try { encodedSize = message.encode(bytes, 0, allocationSize); } catch (BufferOverflowException exception) { final String errorMessage = String.format(Locale.US, "Error sending. Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024); final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, errorMessage, exception, handler.getErrorContext(sender)); return Mono.error(error); } return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState); }).then(); } @Override public Mono<Void> send(List<Message> messageBatch) { return send(messageBatch, null); } @Override public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish data batch when disposed.", handler.getConnectionId(), getLinkName()))); } if (messageBatch.size() == 1) { return send(messageBatch.get(0), deliveryState); } return getLinkSize() .flatMap(maxMessageSize -> { final Message firstMessage = messageBatch.get(0); final Message batchMessage = Proton.message(); batchMessage.setMessageAnnotations(firstMessage.getMessageAnnotations()); final int maxMessageSizeTemp = maxMessageSize; final byte[] bytes = new byte[maxMessageSizeTemp]; int encodedSize = batchMessage.encode(bytes, 0, maxMessageSizeTemp); int byteArrayOffset = encodedSize; for (final Message amqpMessage : messageBatch) { final Message messageWrappedByData = Proton.message(); int payloadSize = messageSerializer.getSize(amqpMessage); int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSizeTemp); byte[] messageBytes = new byte[allocationSize]; int messageSizeBytes = amqpMessage.encode(messageBytes, 0, allocationSize); messageWrappedByData.setBody(new Data(new Binary(messageBytes, 0, messageSizeBytes))); try { encodedSize = messageWrappedByData .encode(bytes, byteArrayOffset, maxMessageSizeTemp - byteArrayOffset - 1); } catch (BufferOverflowException exception) { final String message = String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSizeTemp / 1024); final AmqpException error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, exception, handler.getErrorContext(sender)); return Mono.error(error); } byteArrayOffset = byteArrayOffset + encodedSize; } return send(bytes, byteArrayOffset, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState); }).then(); } private Mono<Void> batchSend(List<Message> batch, int maxMessageSize, DeliveryState deliveryState) { int totalEncodedSize = 0; final CompositeReadableBuffer buffer = new CompositeReadableBuffer(); final byte[] envelopBytes = batchEnvelopAsBinaryData(batch.get(0), maxMessageSize); totalEncodedSize += envelopBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(envelopBytes); for (final Message message : batch) { final byte[] sectionBytes = batchSectionAsBinaryData(message, maxMessageSize); totalEncodedSize += sectionBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(sectionBytes); } final Flux<EndpointState> activeEndpointFlux = RetryUtil.withRetry( handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), retryOptions, activeTimeoutMessage); final int totalEncodedBufferSize = totalEncodedSize; Mono<DeliveryState> sendMono = activeEndpointFlux.then(Mono.create(sink -> { sendWork(new RetriableWorkItem(buffer, totalEncodedBufferSize, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); return sendMono.then(); } private byte[] batchEnvelopAsBinaryData(Message envelopMessage, int maxMessageSize) { final Message message = Proton.message(); message.setMessageAnnotations(envelopMessage.getMessageAnnotations()); final int size = messageSerializer.getSize(message); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = message.encode(encodedBytes, 0, allocationSize); return Arrays.copyOf(encodedBytes, encodedSize); } private byte[] batchSectionAsBinaryData(Message sectionMessage, int maxMessageSize) { final int size = messageSerializer.getSize(sectionMessage); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = sectionMessage.encode(encodedBytes, 0, allocationSize); final Message message = Proton.message(); final Data binaryData = new Data(new Binary(encodedBytes, 0, encodedSize)); message.setBody(binaryData); final int binaryRawSize = binaryData.getValue().getLength(); final int binaryEncodedSize = binaryEncodedSize(binaryRawSize); final byte[] binaryEncodedBytes = new byte[binaryEncodedSize]; message.encode(binaryEncodedBytes, 0, binaryEncodedSize); return binaryEncodedBytes; } private Mono<Void> batchBufferOverflowError(int maxMessageSize) { return Mono.error(new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), new BufferOverflowException(), handler.getErrorContext(sender))); } private int binaryEncodedSize(int binaryRawSize) { if (binaryRawSize <= 255) { return 5 + binaryRawSize; } else { return 8 + binaryRawSize; } } @Override public AmqpErrorContext getErrorContext() { return handler.getErrorContext(sender); } @Override public String getLinkName() { return sender.getName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override public Mono<Integer> getLinkSize() { if (linkSize > 0) { return Mono.defer(() -> Mono.just(this.linkSize)); } synchronized (this) { if (linkSize > 0) { return Mono.defer(() -> Mono.just(linkSize)); } return RetryUtil.withRetry(getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE), retryOptions, activeTimeoutMessage) .then(Mono.fromCallable(() -> { final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize(); if (remoteMaxMessageSize != null) { linkSize = remoteMaxMessageSize.intValue(); } else { logger.warning("Could not get the getRemoteMaxMessageSize. Returning current link size: {}", linkSize); } return linkSize; })); } } @Override public boolean isDisposed() { return isDisposed.get(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void dispose() { close(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } /** * Disposes of the sender. * * @param errorCondition Error condition associated with close operation. * @param message Message associated with why the sender was closed. * * @return A mono that completes when the send link has closed. */ Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return isClosedMono.asMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .log("Setting error condition and disposing. {}", message); final Runnable closeWork = () -> { if (errorCondition != null && sender.getCondition() == null) { sender.setCondition(errorCondition); } sender.close(); }; return Mono.fromRunnable(() -> { try { reactorProvider.getReactorDispatcher().invoke(closeWork); } catch (IOException e) { logger.warning("Could not schedule close work. Running manually. And completing close.", e); closeWork.run(); handleClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException scheduling close work. And completing close."); closeWork.run(); handleClose(); } }).then(isClosedMono.asMono()) .publishOn(Schedulers.boundedElastic()); } /** * A mono that completes when the sender has completely closed. * * @return mono that completes when the sender has completely closed. */ Mono<Void> isClosed() { return isClosedMono.asMono(); } @Override public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) { final Flux<EndpointState> activeEndpointFlux = RetryUtil.withRetry( handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), retryOptions, activeTimeoutMessage); return activeEndpointFlux.then(Mono.create(sink -> { sendWork(new RetriableWorkItem(bytes, arrayOffset, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } /** * Add the work item in pending send to be processed on {@link ReactorDispatcher} thread. * * @param workItem to be processed. */ private void sendWork(RetriableWorkItem workItem) { final String deliveryTag = UUID.randomUUID().toString().replace("-", ""); synchronized (pendingSendLock) { this.pendingSendsMap.put(deliveryTag, workItem); this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0)); } this.scheduleWorkOnDispatcher(); } /** * Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke() */ private void processDeliveredMessage(Delivery delivery) { final DeliveryState outcome = delivery.getRemoteState(); final String deliveryTag = new String(delivery.getTag(), UTF_8); logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Process delivered message."); final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Mismatch (or send timed out)."); return; } else if (workItem.isDeliveryStateProvided()) { workItem.success(outcome); return; } if (outcome instanceof Accepted) { synchronized (errorConditionLock) { lastKnownLinkError = null; lastKnownErrorReportedAt = null; retryAttempts.set(0); } workItem.success(outcome); } else if (outcome instanceof Rejected) { final Rejected rejected = (Rejected) outcome; final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError(); final Exception exception = ExceptionUtil.toException(error.getCondition().toString(), error.getDescription(), handler.getErrorContext(sender)); logger.atWarning() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("rejected", rejected) .log("Delivery rejected."); final int retryAttempt; if (isGeneralSendError(error.getCondition())) { synchronized (errorConditionLock) { lastKnownLinkError = exception; lastKnownErrorReportedAt = Instant.now(); retryAttempt = retryAttempts.incrementAndGet(); } } else { retryAttempt = retryAttempts.get(); } final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt); if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) { cleanupFailedSend(workItem, exception, outcome); } else { workItem.setLastKnownException(exception); try { reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval); } catch (IOException | RejectedExecutionException schedulerException) { exception.initCause(schedulerException); cleanupFailedSend( workItem, new AmqpException(false, String.format(Locale.US, "Entity(%s): send operation failed while scheduling a" + " retry on Reactor, see cause for more details.", entityPath), schedulerException, handler.getErrorContext(sender)), outcome); } } } else if (outcome instanceof Released) { cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(), handler.getErrorContext(sender)), outcome); } else if (outcome instanceof Declared) { final Declared declared = (Declared) outcome; workItem.success(declared); } else { cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(), handler.getErrorContext(sender)), outcome); } } private void scheduleWorkOnDispatcher() { try { reactorProvider.getReactorDispatcher().invoke(this::processSendWork); } catch (IOException e) { logger.warning("Error scheduling work on reactor.", e); } catch (RejectedExecutionException e) { logger.info("Error scheduling work on reactor because of RejectedExecutionException."); } } private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception, final DeliveryState deliveryState) { workItem.error(exception, deliveryState); } private void completeClose() { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result).log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } } /** * Clears pending sends and puts an error in there. * * @param error Error to pass to pending sends. */ private void handleError(Throwable error) { synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed. Dropping error."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends with error."); } pendingSendsMap.forEach((key, value) -> value.error(error, null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private void handleClose() { final String message = String.format("Could not complete sends because link '%s' for '%s' is closed.", getLinkName(), entityPath); final AmqpErrorContext context = handler.getErrorContext(sender); synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends."); } pendingSendsMap.forEach((key, value) -> value.error(new AmqpException(true, message, context), null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private static boolean isGeneralSendError(Symbol amqpError) { return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR || amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED); } private static class WeightedDeliveryTag { private final String deliveryTag; private final int priority; WeightedDeliveryTag(final String deliveryTag, final int priority) { this.deliveryTag = deliveryTag; this.priority = priority; } private String getDeliveryTag() { return this.deliveryTag; } private int getPriority() { return this.priority; } } private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable { private static final long serialVersionUID = -7057500582037295635L; @Override public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) { return deliveryTag1.getPriority() - deliveryTag0.getPriority(); } } /** * Keeps track of messages that have been sent, but may not have been acknowledged by the service. */ private class SendTimeout implements Runnable { private final String deliveryTag; SendTimeout(String deliveryTag) { this.deliveryTag = deliveryTag; } @Override public void run() { final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { return; } Exception cause = lastKnownLinkError; final Exception lastError; final Instant lastErrorTime; synchronized (errorConditionLock) { lastError = lastKnownLinkError; lastErrorTime = lastKnownErrorReportedAt; } if (lastError != null && lastErrorTime != null) { final Instant now = Instant.now(); final boolean isLastErrorAfterSleepTime = lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS)); final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime; final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(retryOptions.getTryTimeout())); cause = isServerBusy || isLastErrorAfterOperationTimeout ? lastError : null; } final AmqpException exception; if (cause instanceof AmqpException) { exception = (AmqpException) cause; } else { exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR, String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath), handler.getErrorContext(sender)); } workItem.error(exception, null); } } }
class ReactorSender implements AmqpSendLink, AsyncCloseable, AutoCloseable { private static final String DELIVERY_TAG_KEY = "deliveryTag"; private static final String PENDING_SENDS_SIZE_KEY = "pending_sends_size"; private final String entityPath; private final Sender sender; private final SendLinkHandler handler; private final ReactorProvider reactorProvider; private final Disposable.Composite subscriptions; private final AtomicBoolean hasConnected = new AtomicBoolean(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final Object pendingSendLock = new Object(); private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>(); private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue = new PriorityQueue<>(1000, new DeliveryTagComparator()); private final ClientLogger logger; private final Flux<AmqpEndpointState> endpointStates; private final TokenManager tokenManager; private final MessageSerializer messageSerializer; private final AmqpRetryPolicy retry; private final AmqpRetryOptions retryOptions; private final String activeTimeoutMessage; private final Scheduler scheduler; private final AmqpMetricsProvider metricsProvider; private final Object errorConditionLock = new Object(); private volatile Exception lastKnownLinkError; private volatile Instant lastKnownErrorReportedAt; private volatile int linkSize; /** * Creates an instance of {@link ReactorSender}. * * @param amqpConnection The parent {@link AmqpConnection} that this sender lives in. * @param entityPath The message broker address for the sender. * @param sender The underlying proton-j sender. * @param handler The proton-j handler associated with the sender. * @param reactorProvider Provider to schedule work on the proton-j reactor. * @param tokenManager Token manager for authorising with the CBS node. Can be {@code null} if it is part of the * transaction manager. * @param messageSerializer Serializer to deserialise and serialize AMQP messages. * @param retryOptions Retry options. * @param scheduler Scheduler to schedule send timeout. */ ReactorSender(AmqpConnection amqpConnection, String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider, TokenManager tokenManager, MessageSerializer messageSerializer, AmqpRetryOptions retryOptions, Scheduler scheduler, AmqpMetricsProvider metricsProvider) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.sender = Objects.requireNonNull(sender, "'sender' cannot be null."); this.handler = Objects.requireNonNull(handler, "'handler' cannot be null."); this.reactorProvider = Objects.requireNonNull(reactorProvider, "'reactorProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.scheduler = Objects.requireNonNull(scheduler, "'scheduler' cannot be null."); this.retry = RetryUtil.getRetryPolicy(retryOptions); this.tokenManager = tokenManager; this.metricsProvider = metricsProvider; String connectionId = handler.getConnectionId() == null ? NOT_APPLICABLE : handler.getConnectionId(); String linkName = getLinkName() == null ? NOT_APPLICABLE : getLinkName(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(LINK_NAME_KEY, linkName); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(ReactorSender.class, loggingContext); this.activeTimeoutMessage = String.format( "ReactorSender connectionId[%s] linkName[%s]: Waiting for send and receive handler to be ACTIVE", handler.getConnectionId(), handler.getLinkName()); this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.verbose("State {}", state); this.hasConnected.set(state == EndpointState.ACTIVE); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { hasConnected.set(false); handleError(error); }) .doOnComplete(() -> { hasConnected.set(false); handleClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage), this.handler.getLinkCredits().subscribe(credit -> { logger.atVerbose().addKeyValue("credits", credit) .log("Credits on link."); this.scheduleWorkOnDispatcher(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); hasConnected.set(false); return closeAsync("Connection shutdown.", null); }).subscribe() ); if (tokenManager != null) { this.subscriptions.add(tokenManager.getAuthorizationResults().onErrorResume(error -> { final Mono<Void> operation = closeAsync(String.format("connectionId[%s] linkName[%s] Token renewal failure. Disposing send " + "link.", amqpConnection.getId(), getLinkName()), new ErrorCondition(Symbol.getSymbol(NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> { logger.atVerbose().addKeyValue("response", response) .log("Token refreshed."); }, error -> { }, () -> { logger.verbose(" Authorization completed. Disposing."); closeAsync("Authorization completed. Disposing.", null).subscribe(); })); } } @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } @Override public Mono<Void> send(Message message) { return send(message, null); } @Override public Mono<Void> send(Message message, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish message when disposed.", handler.getConnectionId(), getLinkName()))); } return getLinkSize() .flatMap(maxMessageSize -> { final int payloadSize = messageSerializer.getSize(message); final int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] bytes = new byte[allocationSize]; int encodedSize; try { encodedSize = message.encode(bytes, 0, allocationSize); } catch (BufferOverflowException exception) { final String errorMessage = String.format(Locale.US, "Error sending. Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024); final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, errorMessage, exception, handler.getErrorContext(sender)); return Mono.error(error); } return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState); }).then(); } @Override public Mono<Void> send(List<Message> messageBatch) { return send(messageBatch, null); } @Override public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish data batch when disposed.", handler.getConnectionId(), getLinkName()))); } if (messageBatch.size() == 1) { return send(messageBatch.get(0), deliveryState); } return getLinkSize() .flatMap(maxMessageSize -> { int totalEncodedSize = 0; final CompositeReadableBuffer buffer = new CompositeReadableBuffer(); final byte[] envelopBytes = batchEnvelopBytes(messageBatch.get(0), maxMessageSize); if (envelopBytes.length > 0) { totalEncodedSize += envelopBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(envelopBytes); } for (final Message message : messageBatch) { final byte[] sectionBytes = batchBinaryDataSectionBytes(message, maxMessageSize); if (sectionBytes.length > 0) { totalEncodedSize += sectionBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(sectionBytes); } else { logger.info("Ignoring the empty message org.apache.qpid.proton.message.message@{} in the batch.", Integer.toHexString(System.identityHashCode(message))); } } return send(buffer, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState); }).then(); } private byte[] batchEnvelopBytes(Message envelopMessage, int maxMessageSize) { final Message message = Proton.message(); message.setMessageAnnotations(envelopMessage.getMessageAnnotations()); if ((envelopMessage.getMessageId() instanceof String) && !CoreUtils.isNullOrEmpty((String) envelopMessage.getMessageId())) { message.setMessageId(envelopMessage.getMessageId()); } if (!CoreUtils.isNullOrEmpty(envelopMessage.getGroupId())) { message.setGroupId(envelopMessage.getGroupId()); } final int size = messageSerializer.getSize(message); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = message.encode(encodedBytes, 0, allocationSize); return Arrays.copyOf(encodedBytes, encodedSize); } private byte[] batchBinaryDataSectionBytes(Message sectionMessage, int maxMessageSize) { final int size = messageSerializer.getSize(sectionMessage); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = sectionMessage.encode(encodedBytes, 0, allocationSize); final Message message = Proton.message(); final Data binaryData = new Data(new Binary(encodedBytes, 0, encodedSize)); message.setBody(binaryData); final int binaryRawSize = binaryData.getValue().getLength(); final int binaryEncodedSize = binaryEncodedSize(binaryRawSize); final byte[] binaryEncodedBytes = new byte[binaryEncodedSize]; message.encode(binaryEncodedBytes, 0, binaryEncodedSize); return binaryEncodedBytes; } private Mono<Void> batchBufferOverflowError(int maxMessageSize) { return FluxUtil.monoError(logger, new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), new BufferOverflowException(), handler.getErrorContext(sender))); } /** * Compute the encoded size when encoding a binary data of given size per Amqp 1.0 spec "amqp:data:binary" format. * * @param binaryRawSize the length of the binary data. * @return the encoded size. */ private int binaryEncodedSize(int binaryRawSize) { if (binaryRawSize <= 255) { return 5 + binaryRawSize; } else { return 8 + binaryRawSize; } } @Override public AmqpErrorContext getErrorContext() { return handler.getErrorContext(sender); } @Override public String getLinkName() { return sender.getName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override public Mono<Integer> getLinkSize() { if (linkSize > 0) { return Mono.defer(() -> Mono.just(this.linkSize)); } synchronized (this) { if (linkSize > 0) { return Mono.defer(() -> Mono.just(linkSize)); } return RetryUtil.withRetry(getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE), retryOptions, activeTimeoutMessage) .then(Mono.fromCallable(() -> { final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize(); if (remoteMaxMessageSize != null) { linkSize = remoteMaxMessageSize.intValue(); } else { logger.warning("Could not get the getRemoteMaxMessageSize. Returning current link size: {}", linkSize); } return linkSize; })); } } @Override public boolean isDisposed() { return isDisposed.get(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void dispose() { close(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } /** * Disposes of the sender. * * @param errorCondition Error condition associated with close operation. * @param message Message associated with why the sender was closed. * * @return A mono that completes when the send link has closed. */ Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return isClosedMono.asMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .log("Setting error condition and disposing. {}", message); final Runnable closeWork = () -> { if (errorCondition != null && sender.getCondition() == null) { sender.setCondition(errorCondition); } sender.close(); }; return Mono.fromRunnable(() -> { try { reactorProvider.getReactorDispatcher().invoke(closeWork); } catch (IOException e) { logger.warning("Could not schedule close work. Running manually. And completing close.", e); closeWork.run(); handleClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException scheduling close work. And completing close."); closeWork.run(); handleClose(); } }).then(isClosedMono.asMono()) .publishOn(Schedulers.boundedElastic()); } /** * A mono that completes when the sender has completely closed. * * @return mono that completes when the sender has completely closed. */ Mono<Void> isClosed() { return isClosedMono.asMono(); } @Override public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) { return onEndpointActive().then(Mono.create(sink -> { sendWork(new RetriableWorkItem(bytes, arrayOffset, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } Mono<DeliveryState> send(ReadableBuffer buffer, int messageFormat, DeliveryState deliveryState) { return onEndpointActive().then(Mono.create(sink -> { sendWork(new RetriableWorkItem(buffer, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } private Flux<EndpointState> onEndpointActive() { return RetryUtil.withRetry(handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), retryOptions, activeTimeoutMessage); } /** * Add the work item in pending send to be processed on {@link ReactorDispatcher} thread. * * @param workItem to be processed. */ private void sendWork(RetriableWorkItem workItem) { final String deliveryTag = UUID.randomUUID().toString().replace("-", ""); synchronized (pendingSendLock) { this.pendingSendsMap.put(deliveryTag, workItem); this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0)); } this.scheduleWorkOnDispatcher(); } /** * Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke() */ private void processDeliveredMessage(Delivery delivery) { final DeliveryState outcome = delivery.getRemoteState(); final String deliveryTag = new String(delivery.getTag(), UTF_8); logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Process delivered message."); final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Mismatch (or send timed out)."); return; } else if (workItem.isDeliveryStateProvided()) { workItem.success(outcome); return; } if (outcome instanceof Accepted) { synchronized (errorConditionLock) { lastKnownLinkError = null; lastKnownErrorReportedAt = null; retryAttempts.set(0); } workItem.success(outcome); } else if (outcome instanceof Rejected) { final Rejected rejected = (Rejected) outcome; final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError(); final Exception exception = ExceptionUtil.toException(error.getCondition().toString(), error.getDescription(), handler.getErrorContext(sender)); logger.atWarning() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("rejected", rejected) .log("Delivery rejected."); final int retryAttempt; if (isGeneralSendError(error.getCondition())) { synchronized (errorConditionLock) { lastKnownLinkError = exception; lastKnownErrorReportedAt = Instant.now(); retryAttempt = retryAttempts.incrementAndGet(); } } else { retryAttempt = retryAttempts.get(); } final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt); if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) { cleanupFailedSend(workItem, exception, outcome); } else { workItem.setLastKnownException(exception); try { reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval); } catch (IOException | RejectedExecutionException schedulerException) { exception.initCause(schedulerException); cleanupFailedSend( workItem, new AmqpException(false, String.format(Locale.US, "Entity(%s): send operation failed while scheduling a" + " retry on Reactor, see cause for more details.", entityPath), schedulerException, handler.getErrorContext(sender)), outcome); } } } else if (outcome instanceof Released) { cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(), handler.getErrorContext(sender)), outcome); } else if (outcome instanceof Declared) { final Declared declared = (Declared) outcome; workItem.success(declared); } else { cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(), handler.getErrorContext(sender)), outcome); } } private void scheduleWorkOnDispatcher() { try { reactorProvider.getReactorDispatcher().invoke(this::processSendWork); } catch (IOException e) { logger.warning("Error scheduling work on reactor.", e); } catch (RejectedExecutionException e) { logger.info("Error scheduling work on reactor because of RejectedExecutionException."); } } private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception, final DeliveryState deliveryState) { workItem.error(exception, deliveryState); } private void completeClose() { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result).log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } } /** * Clears pending sends and puts an error in there. * * @param error Error to pass to pending sends. */ private void handleError(Throwable error) { synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed. Dropping error."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends with error."); } pendingSendsMap.forEach((key, value) -> value.error(error, null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private void handleClose() { final String message = String.format("Could not complete sends because link '%s' for '%s' is closed.", getLinkName(), entityPath); final AmqpErrorContext context = handler.getErrorContext(sender); synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends."); } pendingSendsMap.forEach((key, value) -> value.error(new AmqpException(true, message, context), null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private static boolean isGeneralSendError(Symbol amqpError) { return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR || amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED); } private static class WeightedDeliveryTag { private final String deliveryTag; private final int priority; WeightedDeliveryTag(final String deliveryTag, final int priority) { this.deliveryTag = deliveryTag; this.priority = priority; } private String getDeliveryTag() { return this.deliveryTag; } private int getPriority() { return this.priority; } } private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable { private static final long serialVersionUID = -7057500582037295635L; @Override public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) { return deliveryTag1.getPriority() - deliveryTag0.getPriority(); } } /** * Keeps track of messages that have been sent, but may not have been acknowledged by the service. */ private class SendTimeout implements Runnable { private final String deliveryTag; SendTimeout(String deliveryTag) { this.deliveryTag = deliveryTag; } @Override public void run() { final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { return; } Exception cause = lastKnownLinkError; final Exception lastError; final Instant lastErrorTime; synchronized (errorConditionLock) { lastError = lastKnownLinkError; lastErrorTime = lastKnownErrorReportedAt; } if (lastError != null && lastErrorTime != null) { final Instant now = Instant.now(); final boolean isLastErrorAfterSleepTime = lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS)); final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime; final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(retryOptions.getTryTimeout())); cause = isServerBusy || isLastErrorAfterOperationTimeout ? lastError : null; } final AmqpException exception; if (cause instanceof AmqpException) { exception = (AmqpException) cause; } else { exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR, String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath), handler.getErrorContext(sender)); } workItem.error(exception, null); } } }
Once we finalize, this method will no longer return byte[] but ReadableBuffer
private void processSendWork() { if (!hasConnected.get()) { logger.warning("Not connected. Not processing send work."); return; } if (isDisposed.get()) { logger.info("Sender is closed. Not executing work."); return; } while (hasConnected.get() && sender.getCredit() > 0) { final WeightedDeliveryTag weightedDelivery; final RetriableWorkItem workItem; final String deliveryTag; synchronized (pendingSendLock) { weightedDelivery = this.pendingSendsQueue.poll(); if (weightedDelivery != null) { deliveryTag = weightedDelivery.getDeliveryTag(); workItem = this.pendingSendsMap.get(deliveryTag); } else { workItem = null; deliveryTag = null; } } if (workItem == null) { if (deliveryTag != null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("sendData not found for this delivery."); } break; } Delivery delivery = null; boolean linkAdvance = false; int sentMsgSize = 0; Exception sendException = null; try { workItem.beforeTry(); delivery = sender.delivery(deliveryTag.getBytes(UTF_8)); delivery.setMessageFormat(workItem.getMessageFormat()); if (workItem.isDeliveryStateProvided()) { delivery.disposition(workItem.getDeliveryState()); } if (workItem.getMessage() != null) { final byte[] encodedBytes = workItem.getMessage(); sentMsgSize = sender.send(encodedBytes, 0, workItem.getEncodedMessageSize()); } else { final ReadableBuffer encodedBuffer = workItem.getEncodedBuffer(); encodedBuffer.position(0); sentMsgSize = sender.send(encodedBuffer); } sentMsgSize = sender.send(workItem.getMessage(), 0, workItem.getEncodedMessageSize()); assert sentMsgSize == workItem.getEncodedMessageSize() : "Contract of the ProtonJ library for Sender. Send API changed"; linkAdvance = sender.advance(); } catch (Exception exception) { sendException = exception; } if (linkAdvance) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Sent message."); workItem.setWaitingForAck(); scheduler.schedule(new SendTimeout(deliveryTag), retryOptions.getTryTimeout().toMillis(), TimeUnit.MILLISECONDS); } else { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("sentMessageSize", sentMsgSize) .addKeyValue("payloadActualSize", workItem.getEncodedMessageSize()) .log("Sendlink advance failed."); DeliveryState outcome = null; if (delivery != null) { outcome = delivery.getRemoteState(); delivery.free(); } final AmqpErrorContext context = handler.getErrorContext(sender); final Throwable exception = sendException != null ? new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed. Please see cause for more details", entityPath), sendException, context) : new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed while advancing delivery(tag: %s).", entityPath, deliveryTag), context); workItem.error(exception, outcome); } } }
if (workItem.getMessage() != null) {
private void processSendWork() { if (!hasConnected.get()) { logger.warning("Not connected. Not processing send work."); return; } if (isDisposed.get()) { logger.info("Sender is closed. Not executing work."); return; } while (hasConnected.get() && sender.getCredit() > 0) { final WeightedDeliveryTag weightedDelivery; final RetriableWorkItem workItem; final String deliveryTag; synchronized (pendingSendLock) { weightedDelivery = this.pendingSendsQueue.poll(); if (weightedDelivery != null) { deliveryTag = weightedDelivery.getDeliveryTag(); workItem = this.pendingSendsMap.get(deliveryTag); } else { workItem = null; deliveryTag = null; } } if (workItem == null) { if (deliveryTag != null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("sendData not found for this delivery."); } break; } Delivery delivery = null; boolean linkAdvance = false; int sentMsgSize = 0; Exception sendException = null; try { workItem.beforeTry(); delivery = sender.delivery(deliveryTag.getBytes(UTF_8)); delivery.setMessageFormat(workItem.getMessageFormat()); if (workItem.isDeliveryStateProvided()) { delivery.disposition(workItem.getDeliveryState()); } workItem.send(sender); linkAdvance = sender.advance(); } catch (Exception exception) { sendException = exception; } if (linkAdvance) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Sent message."); workItem.setWaitingForAck(); scheduler.schedule(new SendTimeout(deliveryTag), retryOptions.getTryTimeout().toMillis(), TimeUnit.MILLISECONDS); } else { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("sentMessageSize", sentMsgSize) .addKeyValue("payloadActualSize", workItem.getEncodedMessageSize()) .log("Sendlink advance failed."); DeliveryState outcome = null; if (delivery != null) { outcome = delivery.getRemoteState(); delivery.free(); } final AmqpErrorContext context = handler.getErrorContext(sender); final Throwable exception = sendException != null ? new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed. Please see cause for more details", entityPath), sendException, context) : new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed while advancing delivery(tag: %s).", entityPath, deliveryTag), context); workItem.error(exception, outcome); } } }
class ReactorSender implements AmqpSendLink, AsyncCloseable, AutoCloseable { private static final String DELIVERY_TAG_KEY = "deliveryTag"; private static final String PENDING_SENDS_SIZE_KEY = "pending_sends_size"; private final String entityPath; private final Sender sender; private final SendLinkHandler handler; private final ReactorProvider reactorProvider; private final Disposable.Composite subscriptions; private final AtomicBoolean hasConnected = new AtomicBoolean(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final Object pendingSendLock = new Object(); private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>(); private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue = new PriorityQueue<>(1000, new DeliveryTagComparator()); private final ClientLogger logger; private final Flux<AmqpEndpointState> endpointStates; private final TokenManager tokenManager; private final MessageSerializer messageSerializer; private final AmqpRetryPolicy retry; private final AmqpRetryOptions retryOptions; private final String activeTimeoutMessage; private final Scheduler scheduler; private final AmqpMetricsProvider metricsProvider; private final Object errorConditionLock = new Object(); private volatile Exception lastKnownLinkError; private volatile Instant lastKnownErrorReportedAt; private volatile int linkSize; /** * Creates an instance of {@link ReactorSender}. * * @param amqpConnection The parent {@link AmqpConnection} that this sender lives in. * @param entityPath The message broker address for the sender. * @param sender The underlying proton-j sender. * @param handler The proton-j handler associated with the sender. * @param reactorProvider Provider to schedule work on the proton-j reactor. * @param tokenManager Token manager for authorising with the CBS node. Can be {@code null} if it is part of the * transaction manager. * @param messageSerializer Serializer to deserialise and serialize AMQP messages. * @param retryOptions Retry options. * @param scheduler Scheduler to schedule send timeout. */ ReactorSender(AmqpConnection amqpConnection, String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider, TokenManager tokenManager, MessageSerializer messageSerializer, AmqpRetryOptions retryOptions, Scheduler scheduler, AmqpMetricsProvider metricsProvider) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.sender = Objects.requireNonNull(sender, "'sender' cannot be null."); this.handler = Objects.requireNonNull(handler, "'handler' cannot be null."); this.reactorProvider = Objects.requireNonNull(reactorProvider, "'reactorProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.scheduler = Objects.requireNonNull(scheduler, "'scheduler' cannot be null."); this.retry = RetryUtil.getRetryPolicy(retryOptions); this.tokenManager = tokenManager; this.metricsProvider = metricsProvider; String connectionId = handler.getConnectionId() == null ? NOT_APPLICABLE : handler.getConnectionId(); String linkName = getLinkName() == null ? NOT_APPLICABLE : getLinkName(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(LINK_NAME_KEY, linkName); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(ReactorSender.class, loggingContext); this.activeTimeoutMessage = String.format( "ReactorSender connectionId[%s] linkName[%s]: Waiting for send and receive handler to be ACTIVE", handler.getConnectionId(), handler.getLinkName()); this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.verbose("State {}", state); this.hasConnected.set(state == EndpointState.ACTIVE); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { hasConnected.set(false); handleError(error); }) .doOnComplete(() -> { hasConnected.set(false); handleClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage), this.handler.getLinkCredits().subscribe(credit -> { logger.atVerbose().addKeyValue("credits", credit) .log("Credits on link."); this.scheduleWorkOnDispatcher(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); hasConnected.set(false); return closeAsync("Connection shutdown.", null); }).subscribe() ); if (tokenManager != null) { this.subscriptions.add(tokenManager.getAuthorizationResults().onErrorResume(error -> { final Mono<Void> operation = closeAsync(String.format("connectionId[%s] linkName[%s] Token renewal failure. Disposing send " + "link.", amqpConnection.getId(), getLinkName()), new ErrorCondition(Symbol.getSymbol(NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> { logger.atVerbose().addKeyValue("response", response) .log("Token refreshed."); }, error -> { }, () -> { logger.verbose(" Authorization completed. Disposing."); closeAsync("Authorization completed. Disposing.", null).subscribe(); })); } } @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } @Override public Mono<Void> send(Message message) { return send(message, null); } @Override public Mono<Void> send(Message message, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish message when disposed.", handler.getConnectionId(), getLinkName()))); } return getLinkSize() .flatMap(maxMessageSize -> { final int payloadSize = messageSerializer.getSize(message); final int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] bytes = new byte[allocationSize]; int encodedSize; try { encodedSize = message.encode(bytes, 0, allocationSize); } catch (BufferOverflowException exception) { final String errorMessage = String.format(Locale.US, "Error sending. Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024); final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, errorMessage, exception, handler.getErrorContext(sender)); return Mono.error(error); } return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState); }).then(); } @Override public Mono<Void> send(List<Message> messageBatch) { return send(messageBatch, null); } @Override public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish data batch when disposed.", handler.getConnectionId(), getLinkName()))); } if (messageBatch.size() == 1) { return send(messageBatch.get(0), deliveryState); } return getLinkSize() .flatMap(maxMessageSize -> { final Message firstMessage = messageBatch.get(0); final Message batchMessage = Proton.message(); batchMessage.setMessageAnnotations(firstMessage.getMessageAnnotations()); final int maxMessageSizeTemp = maxMessageSize; final byte[] bytes = new byte[maxMessageSizeTemp]; int encodedSize = batchMessage.encode(bytes, 0, maxMessageSizeTemp); int byteArrayOffset = encodedSize; for (final Message amqpMessage : messageBatch) { final Message messageWrappedByData = Proton.message(); int payloadSize = messageSerializer.getSize(amqpMessage); int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSizeTemp); byte[] messageBytes = new byte[allocationSize]; int messageSizeBytes = amqpMessage.encode(messageBytes, 0, allocationSize); messageWrappedByData.setBody(new Data(new Binary(messageBytes, 0, messageSizeBytes))); try { encodedSize = messageWrappedByData .encode(bytes, byteArrayOffset, maxMessageSizeTemp - byteArrayOffset - 1); } catch (BufferOverflowException exception) { final String message = String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSizeTemp / 1024); final AmqpException error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, exception, handler.getErrorContext(sender)); return Mono.error(error); } byteArrayOffset = byteArrayOffset + encodedSize; } return send(bytes, byteArrayOffset, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState); }).then(); } private Mono<Void> batchSend(List<Message> batch, int maxMessageSize, DeliveryState deliveryState) { int totalEncodedSize = 0; final CompositeReadableBuffer buffer = new CompositeReadableBuffer(); final byte[] envelopBytes = batchEnvelopBytes(batch.get(0), maxMessageSize); totalEncodedSize += envelopBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(envelopBytes); for (final Message message : batch) { final byte[] sectionBytes = batchBinaryDataSectionBytes(message, maxMessageSize); totalEncodedSize += sectionBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(sectionBytes); } final Flux<EndpointState> activeEndpointFlux = RetryUtil.withRetry( handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), retryOptions, activeTimeoutMessage); final int totalEncodedBufferSize = totalEncodedSize; Mono<DeliveryState> sendMono = activeEndpointFlux.then(Mono.create(sink -> { sendWork(new RetriableWorkItem(buffer, totalEncodedBufferSize, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); return sendMono.then(); } private byte[] batchEnvelopBytes(Message envelopMessage, int maxMessageSize) { final Message message = Proton.message(); message.setMessageAnnotations(envelopMessage.getMessageAnnotations()); final int size = messageSerializer.getSize(message); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = message.encode(encodedBytes, 0, allocationSize); return Arrays.copyOf(encodedBytes, encodedSize); } private byte[] batchBinaryDataSectionBytes(Message sectionMessage, int maxMessageSize) { final int size = messageSerializer.getSize(sectionMessage); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = sectionMessage.encode(encodedBytes, 0, allocationSize); final Message message = Proton.message(); final Data binaryData = new Data(new Binary(encodedBytes, 0, encodedSize)); message.setBody(binaryData); final int binaryRawSize = binaryData.getValue().getLength(); final int binaryEncodedSize = binaryEncodedSize(binaryRawSize); final byte[] binaryEncodedBytes = new byte[binaryEncodedSize]; message.encode(binaryEncodedBytes, 0, binaryEncodedSize); return binaryEncodedBytes; } private Mono<Void> batchBufferOverflowError(int maxMessageSize) { return Mono.error(new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), new BufferOverflowException(), handler.getErrorContext(sender))); } private int binaryEncodedSize(int binaryRawSize) { if (binaryRawSize <= 255) { return 5 + binaryRawSize; } else { return 8 + binaryRawSize; } } @Override public AmqpErrorContext getErrorContext() { return handler.getErrorContext(sender); } @Override public String getLinkName() { return sender.getName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override public Mono<Integer> getLinkSize() { if (linkSize > 0) { return Mono.defer(() -> Mono.just(this.linkSize)); } synchronized (this) { if (linkSize > 0) { return Mono.defer(() -> Mono.just(linkSize)); } return RetryUtil.withRetry(getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE), retryOptions, activeTimeoutMessage) .then(Mono.fromCallable(() -> { final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize(); if (remoteMaxMessageSize != null) { linkSize = remoteMaxMessageSize.intValue(); } else { logger.warning("Could not get the getRemoteMaxMessageSize. Returning current link size: {}", linkSize); } return linkSize; })); } } @Override public boolean isDisposed() { return isDisposed.get(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void dispose() { close(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } /** * Disposes of the sender. * * @param errorCondition Error condition associated with close operation. * @param message Message associated with why the sender was closed. * * @return A mono that completes when the send link has closed. */ Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return isClosedMono.asMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .log("Setting error condition and disposing. {}", message); final Runnable closeWork = () -> { if (errorCondition != null && sender.getCondition() == null) { sender.setCondition(errorCondition); } sender.close(); }; return Mono.fromRunnable(() -> { try { reactorProvider.getReactorDispatcher().invoke(closeWork); } catch (IOException e) { logger.warning("Could not schedule close work. Running manually. And completing close.", e); closeWork.run(); handleClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException scheduling close work. And completing close."); closeWork.run(); handleClose(); } }).then(isClosedMono.asMono()) .publishOn(Schedulers.boundedElastic()); } /** * A mono that completes when the sender has completely closed. * * @return mono that completes when the sender has completely closed. */ Mono<Void> isClosed() { return isClosedMono.asMono(); } @Override public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) { final Flux<EndpointState> activeEndpointFlux = RetryUtil.withRetry( handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), retryOptions, activeTimeoutMessage); return activeEndpointFlux.then(Mono.create(sink -> { sendWork(new RetriableWorkItem(bytes, arrayOffset, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } /** * Add the work item in pending send to be processed on {@link ReactorDispatcher} thread. * * @param workItem to be processed. */ private void sendWork(RetriableWorkItem workItem) { final String deliveryTag = UUID.randomUUID().toString().replace("-", ""); synchronized (pendingSendLock) { this.pendingSendsMap.put(deliveryTag, workItem); this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0)); } this.scheduleWorkOnDispatcher(); } /** * Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke() */ private void processDeliveredMessage(Delivery delivery) { final DeliveryState outcome = delivery.getRemoteState(); final String deliveryTag = new String(delivery.getTag(), UTF_8); logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Process delivered message."); final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Mismatch (or send timed out)."); return; } else if (workItem.isDeliveryStateProvided()) { workItem.success(outcome); return; } if (outcome instanceof Accepted) { synchronized (errorConditionLock) { lastKnownLinkError = null; lastKnownErrorReportedAt = null; retryAttempts.set(0); } workItem.success(outcome); } else if (outcome instanceof Rejected) { final Rejected rejected = (Rejected) outcome; final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError(); final Exception exception = ExceptionUtil.toException(error.getCondition().toString(), error.getDescription(), handler.getErrorContext(sender)); logger.atWarning() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("rejected", rejected) .log("Delivery rejected."); final int retryAttempt; if (isGeneralSendError(error.getCondition())) { synchronized (errorConditionLock) { lastKnownLinkError = exception; lastKnownErrorReportedAt = Instant.now(); retryAttempt = retryAttempts.incrementAndGet(); } } else { retryAttempt = retryAttempts.get(); } final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt); if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) { cleanupFailedSend(workItem, exception, outcome); } else { workItem.setLastKnownException(exception); try { reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval); } catch (IOException | RejectedExecutionException schedulerException) { exception.initCause(schedulerException); cleanupFailedSend( workItem, new AmqpException(false, String.format(Locale.US, "Entity(%s): send operation failed while scheduling a" + " retry on Reactor, see cause for more details.", entityPath), schedulerException, handler.getErrorContext(sender)), outcome); } } } else if (outcome instanceof Released) { cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(), handler.getErrorContext(sender)), outcome); } else if (outcome instanceof Declared) { final Declared declared = (Declared) outcome; workItem.success(declared); } else { cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(), handler.getErrorContext(sender)), outcome); } } private void scheduleWorkOnDispatcher() { try { reactorProvider.getReactorDispatcher().invoke(this::processSendWork); } catch (IOException e) { logger.warning("Error scheduling work on reactor.", e); } catch (RejectedExecutionException e) { logger.info("Error scheduling work on reactor because of RejectedExecutionException."); } } private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception, final DeliveryState deliveryState) { workItem.error(exception, deliveryState); } private void completeClose() { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result).log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } } /** * Clears pending sends and puts an error in there. * * @param error Error to pass to pending sends. */ private void handleError(Throwable error) { synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed. Dropping error."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends with error."); } pendingSendsMap.forEach((key, value) -> value.error(error, null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private void handleClose() { final String message = String.format("Could not complete sends because link '%s' for '%s' is closed.", getLinkName(), entityPath); final AmqpErrorContext context = handler.getErrorContext(sender); synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends."); } pendingSendsMap.forEach((key, value) -> value.error(new AmqpException(true, message, context), null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private static boolean isGeneralSendError(Symbol amqpError) { return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR || amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED); } private static class WeightedDeliveryTag { private final String deliveryTag; private final int priority; WeightedDeliveryTag(final String deliveryTag, final int priority) { this.deliveryTag = deliveryTag; this.priority = priority; } private String getDeliveryTag() { return this.deliveryTag; } private int getPriority() { return this.priority; } } private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable { private static final long serialVersionUID = -7057500582037295635L; @Override public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) { return deliveryTag1.getPriority() - deliveryTag0.getPriority(); } } /** * Keeps track of messages that have been sent, but may not have been acknowledged by the service. */ private class SendTimeout implements Runnable { private final String deliveryTag; SendTimeout(String deliveryTag) { this.deliveryTag = deliveryTag; } @Override public void run() { final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { return; } Exception cause = lastKnownLinkError; final Exception lastError; final Instant lastErrorTime; synchronized (errorConditionLock) { lastError = lastKnownLinkError; lastErrorTime = lastKnownErrorReportedAt; } if (lastError != null && lastErrorTime != null) { final Instant now = Instant.now(); final boolean isLastErrorAfterSleepTime = lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS)); final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime; final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(retryOptions.getTryTimeout())); cause = isServerBusy || isLastErrorAfterOperationTimeout ? lastError : null; } final AmqpException exception; if (cause instanceof AmqpException) { exception = (AmqpException) cause; } else { exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR, String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath), handler.getErrorContext(sender)); } workItem.error(exception, null); } } }
class ReactorSender implements AmqpSendLink, AsyncCloseable, AutoCloseable { private static final String DELIVERY_TAG_KEY = "deliveryTag"; private static final String PENDING_SENDS_SIZE_KEY = "pending_sends_size"; private final String entityPath; private final Sender sender; private final SendLinkHandler handler; private final ReactorProvider reactorProvider; private final Disposable.Composite subscriptions; private final AtomicBoolean hasConnected = new AtomicBoolean(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final Object pendingSendLock = new Object(); private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>(); private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue = new PriorityQueue<>(1000, new DeliveryTagComparator()); private final ClientLogger logger; private final Flux<AmqpEndpointState> endpointStates; private final TokenManager tokenManager; private final MessageSerializer messageSerializer; private final AmqpRetryPolicy retry; private final AmqpRetryOptions retryOptions; private final String activeTimeoutMessage; private final Scheduler scheduler; private final AmqpMetricsProvider metricsProvider; private final Object errorConditionLock = new Object(); private volatile Exception lastKnownLinkError; private volatile Instant lastKnownErrorReportedAt; private volatile int linkSize; /** * Creates an instance of {@link ReactorSender}. * * @param amqpConnection The parent {@link AmqpConnection} that this sender lives in. * @param entityPath The message broker address for the sender. * @param sender The underlying proton-j sender. * @param handler The proton-j handler associated with the sender. * @param reactorProvider Provider to schedule work on the proton-j reactor. * @param tokenManager Token manager for authorising with the CBS node. Can be {@code null} if it is part of the * transaction manager. * @param messageSerializer Serializer to deserialise and serialize AMQP messages. * @param retryOptions Retry options. * @param scheduler Scheduler to schedule send timeout. */ ReactorSender(AmqpConnection amqpConnection, String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider, TokenManager tokenManager, MessageSerializer messageSerializer, AmqpRetryOptions retryOptions, Scheduler scheduler, AmqpMetricsProvider metricsProvider) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.sender = Objects.requireNonNull(sender, "'sender' cannot be null."); this.handler = Objects.requireNonNull(handler, "'handler' cannot be null."); this.reactorProvider = Objects.requireNonNull(reactorProvider, "'reactorProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.scheduler = Objects.requireNonNull(scheduler, "'scheduler' cannot be null."); this.retry = RetryUtil.getRetryPolicy(retryOptions); this.tokenManager = tokenManager; this.metricsProvider = metricsProvider; String connectionId = handler.getConnectionId() == null ? NOT_APPLICABLE : handler.getConnectionId(); String linkName = getLinkName() == null ? NOT_APPLICABLE : getLinkName(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(LINK_NAME_KEY, linkName); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(ReactorSender.class, loggingContext); this.activeTimeoutMessage = String.format( "ReactorSender connectionId[%s] linkName[%s]: Waiting for send and receive handler to be ACTIVE", handler.getConnectionId(), handler.getLinkName()); this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.verbose("State {}", state); this.hasConnected.set(state == EndpointState.ACTIVE); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { hasConnected.set(false); handleError(error); }) .doOnComplete(() -> { hasConnected.set(false); handleClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage), this.handler.getLinkCredits().subscribe(credit -> { logger.atVerbose().addKeyValue("credits", credit) .log("Credits on link."); this.scheduleWorkOnDispatcher(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); hasConnected.set(false); return closeAsync("Connection shutdown.", null); }).subscribe() ); if (tokenManager != null) { this.subscriptions.add(tokenManager.getAuthorizationResults().onErrorResume(error -> { final Mono<Void> operation = closeAsync(String.format("connectionId[%s] linkName[%s] Token renewal failure. Disposing send " + "link.", amqpConnection.getId(), getLinkName()), new ErrorCondition(Symbol.getSymbol(NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> { logger.atVerbose().addKeyValue("response", response) .log("Token refreshed."); }, error -> { }, () -> { logger.verbose(" Authorization completed. Disposing."); closeAsync("Authorization completed. Disposing.", null).subscribe(); })); } } @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } @Override public Mono<Void> send(Message message) { return send(message, null); } @Override public Mono<Void> send(Message message, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish message when disposed.", handler.getConnectionId(), getLinkName()))); } return getLinkSize() .flatMap(maxMessageSize -> { final int payloadSize = messageSerializer.getSize(message); final int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] bytes = new byte[allocationSize]; int encodedSize; try { encodedSize = message.encode(bytes, 0, allocationSize); } catch (BufferOverflowException exception) { final String errorMessage = String.format(Locale.US, "Error sending. Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024); final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, errorMessage, exception, handler.getErrorContext(sender)); return Mono.error(error); } return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState); }).then(); } @Override public Mono<Void> send(List<Message> messageBatch) { return send(messageBatch, null); } @Override public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish data batch when disposed.", handler.getConnectionId(), getLinkName()))); } if (messageBatch.size() == 1) { return send(messageBatch.get(0), deliveryState); } return getLinkSize() .flatMap(maxMessageSize -> { int totalEncodedSize = 0; final CompositeReadableBuffer buffer = new CompositeReadableBuffer(); final byte[] envelopBytes = batchEnvelopBytes(messageBatch.get(0), maxMessageSize); if (envelopBytes.length > 0) { totalEncodedSize += envelopBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(envelopBytes); } for (final Message message : messageBatch) { final byte[] sectionBytes = batchBinaryDataSectionBytes(message, maxMessageSize); if (sectionBytes.length > 0) { totalEncodedSize += sectionBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(sectionBytes); } else { logger.info("Ignoring the empty message org.apache.qpid.proton.message.message@{} in the batch.", Integer.toHexString(System.identityHashCode(message))); } } return send(buffer, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState); }).then(); } private byte[] batchEnvelopBytes(Message envelopMessage, int maxMessageSize) { final Message message = Proton.message(); message.setMessageAnnotations(envelopMessage.getMessageAnnotations()); if ((envelopMessage.getMessageId() instanceof String) && !CoreUtils.isNullOrEmpty((String) envelopMessage.getMessageId())) { message.setMessageId(envelopMessage.getMessageId()); } if (!CoreUtils.isNullOrEmpty(envelopMessage.getGroupId())) { message.setGroupId(envelopMessage.getGroupId()); } final int size = messageSerializer.getSize(message); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = message.encode(encodedBytes, 0, allocationSize); return Arrays.copyOf(encodedBytes, encodedSize); } private byte[] batchBinaryDataSectionBytes(Message sectionMessage, int maxMessageSize) { final int size = messageSerializer.getSize(sectionMessage); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = sectionMessage.encode(encodedBytes, 0, allocationSize); final Message message = Proton.message(); final Data binaryData = new Data(new Binary(encodedBytes, 0, encodedSize)); message.setBody(binaryData); final int binaryRawSize = binaryData.getValue().getLength(); final int binaryEncodedSize = binaryEncodedSize(binaryRawSize); final byte[] binaryEncodedBytes = new byte[binaryEncodedSize]; message.encode(binaryEncodedBytes, 0, binaryEncodedSize); return binaryEncodedBytes; } private Mono<Void> batchBufferOverflowError(int maxMessageSize) { return FluxUtil.monoError(logger, new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), new BufferOverflowException(), handler.getErrorContext(sender))); } /** * Compute the encoded size when encoding a binary data of given size per Amqp 1.0 spec "amqp:data:binary" format. * * @param binaryRawSize the length of the binary data. * @return the encoded size. */ private int binaryEncodedSize(int binaryRawSize) { if (binaryRawSize <= 255) { return 5 + binaryRawSize; } else { return 8 + binaryRawSize; } } @Override public AmqpErrorContext getErrorContext() { return handler.getErrorContext(sender); } @Override public String getLinkName() { return sender.getName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override public Mono<Integer> getLinkSize() { if (linkSize > 0) { return Mono.defer(() -> Mono.just(this.linkSize)); } synchronized (this) { if (linkSize > 0) { return Mono.defer(() -> Mono.just(linkSize)); } return RetryUtil.withRetry(getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE), retryOptions, activeTimeoutMessage) .then(Mono.fromCallable(() -> { final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize(); if (remoteMaxMessageSize != null) { linkSize = remoteMaxMessageSize.intValue(); } else { logger.warning("Could not get the getRemoteMaxMessageSize. Returning current link size: {}", linkSize); } return linkSize; })); } } @Override public boolean isDisposed() { return isDisposed.get(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void dispose() { close(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } /** * Disposes of the sender. * * @param errorCondition Error condition associated with close operation. * @param message Message associated with why the sender was closed. * * @return A mono that completes when the send link has closed. */ Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return isClosedMono.asMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .log("Setting error condition and disposing. {}", message); final Runnable closeWork = () -> { if (errorCondition != null && sender.getCondition() == null) { sender.setCondition(errorCondition); } sender.close(); }; return Mono.fromRunnable(() -> { try { reactorProvider.getReactorDispatcher().invoke(closeWork); } catch (IOException e) { logger.warning("Could not schedule close work. Running manually. And completing close.", e); closeWork.run(); handleClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException scheduling close work. And completing close."); closeWork.run(); handleClose(); } }).then(isClosedMono.asMono()) .publishOn(Schedulers.boundedElastic()); } /** * A mono that completes when the sender has completely closed. * * @return mono that completes when the sender has completely closed. */ Mono<Void> isClosed() { return isClosedMono.asMono(); } @Override public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) { return onEndpointActive().then(Mono.create(sink -> { sendWork(new RetriableWorkItem(bytes, arrayOffset, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } Mono<DeliveryState> send(ReadableBuffer buffer, int messageFormat, DeliveryState deliveryState) { return onEndpointActive().then(Mono.create(sink -> { sendWork(new RetriableWorkItem(buffer, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } private Flux<EndpointState> onEndpointActive() { return RetryUtil.withRetry(handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), retryOptions, activeTimeoutMessage); } /** * Add the work item in pending send to be processed on {@link ReactorDispatcher} thread. * * @param workItem to be processed. */ private void sendWork(RetriableWorkItem workItem) { final String deliveryTag = UUID.randomUUID().toString().replace("-", ""); synchronized (pendingSendLock) { this.pendingSendsMap.put(deliveryTag, workItem); this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0)); } this.scheduleWorkOnDispatcher(); } /** * Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke() */ private void processDeliveredMessage(Delivery delivery) { final DeliveryState outcome = delivery.getRemoteState(); final String deliveryTag = new String(delivery.getTag(), UTF_8); logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Process delivered message."); final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Mismatch (or send timed out)."); return; } else if (workItem.isDeliveryStateProvided()) { workItem.success(outcome); return; } if (outcome instanceof Accepted) { synchronized (errorConditionLock) { lastKnownLinkError = null; lastKnownErrorReportedAt = null; retryAttempts.set(0); } workItem.success(outcome); } else if (outcome instanceof Rejected) { final Rejected rejected = (Rejected) outcome; final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError(); final Exception exception = ExceptionUtil.toException(error.getCondition().toString(), error.getDescription(), handler.getErrorContext(sender)); logger.atWarning() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("rejected", rejected) .log("Delivery rejected."); final int retryAttempt; if (isGeneralSendError(error.getCondition())) { synchronized (errorConditionLock) { lastKnownLinkError = exception; lastKnownErrorReportedAt = Instant.now(); retryAttempt = retryAttempts.incrementAndGet(); } } else { retryAttempt = retryAttempts.get(); } final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt); if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) { cleanupFailedSend(workItem, exception, outcome); } else { workItem.setLastKnownException(exception); try { reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval); } catch (IOException | RejectedExecutionException schedulerException) { exception.initCause(schedulerException); cleanupFailedSend( workItem, new AmqpException(false, String.format(Locale.US, "Entity(%s): send operation failed while scheduling a" + " retry on Reactor, see cause for more details.", entityPath), schedulerException, handler.getErrorContext(sender)), outcome); } } } else if (outcome instanceof Released) { cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(), handler.getErrorContext(sender)), outcome); } else if (outcome instanceof Declared) { final Declared declared = (Declared) outcome; workItem.success(declared); } else { cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(), handler.getErrorContext(sender)), outcome); } } private void scheduleWorkOnDispatcher() { try { reactorProvider.getReactorDispatcher().invoke(this::processSendWork); } catch (IOException e) { logger.warning("Error scheduling work on reactor.", e); } catch (RejectedExecutionException e) { logger.info("Error scheduling work on reactor because of RejectedExecutionException."); } } private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception, final DeliveryState deliveryState) { workItem.error(exception, deliveryState); } private void completeClose() { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result).log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } } /** * Clears pending sends and puts an error in there. * * @param error Error to pass to pending sends. */ private void handleError(Throwable error) { synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed. Dropping error."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends with error."); } pendingSendsMap.forEach((key, value) -> value.error(error, null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private void handleClose() { final String message = String.format("Could not complete sends because link '%s' for '%s' is closed.", getLinkName(), entityPath); final AmqpErrorContext context = handler.getErrorContext(sender); synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends."); } pendingSendsMap.forEach((key, value) -> value.error(new AmqpException(true, message, context), null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private static boolean isGeneralSendError(Symbol amqpError) { return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR || amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED); } private static class WeightedDeliveryTag { private final String deliveryTag; private final int priority; WeightedDeliveryTag(final String deliveryTag, final int priority) { this.deliveryTag = deliveryTag; this.priority = priority; } private String getDeliveryTag() { return this.deliveryTag; } private int getPriority() { return this.priority; } } private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable { private static final long serialVersionUID = -7057500582037295635L; @Override public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) { return deliveryTag1.getPriority() - deliveryTag0.getPriority(); } } /** * Keeps track of messages that have been sent, but may not have been acknowledged by the service. */ private class SendTimeout implements Runnable { private final String deliveryTag; SendTimeout(String deliveryTag) { this.deliveryTag = deliveryTag; } @Override public void run() { final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { return; } Exception cause = lastKnownLinkError; final Exception lastError; final Instant lastErrorTime; synchronized (errorConditionLock) { lastError = lastKnownLinkError; lastErrorTime = lastKnownErrorReportedAt; } if (lastError != null && lastErrorTime != null) { final Instant now = Instant.now(); final boolean isLastErrorAfterSleepTime = lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS)); final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime; final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(retryOptions.getTryTimeout())); cause = isServerBusy || isLastErrorAfterOperationTimeout ? lastError : null; } final AmqpException exception; if (cause instanceof AmqpException) { exception = (AmqpException) cause; } else { exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR, String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath), handler.getErrorContext(sender)); } workItem.error(exception, null); } } }
Just to make sure, new-ing this up is the usage pattern? I feel like in some cases we've had to use some static accessor to get some shared pool or buffer.
public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) { final CompositeReadableBuffer buffer = new CompositeReadableBuffer(); buffer.append(bytes).limit(arrayOffset); return send(buffer, messageFormat, deliveryState); }
final CompositeReadableBuffer buffer = new CompositeReadableBuffer();
public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) { return onEndpointActive().then(Mono.create(sink -> { sendWork(new RetriableWorkItem(bytes, arrayOffset, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); }
class ReactorSender implements AmqpSendLink, AsyncCloseable, AutoCloseable { private static final String DELIVERY_TAG_KEY = "deliveryTag"; private static final String PENDING_SENDS_SIZE_KEY = "pending_sends_size"; private final String entityPath; private final Sender sender; private final SendLinkHandler handler; private final ReactorProvider reactorProvider; private final Disposable.Composite subscriptions; private final AtomicBoolean hasConnected = new AtomicBoolean(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final Object pendingSendLock = new Object(); private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>(); private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue = new PriorityQueue<>(1000, new DeliveryTagComparator()); private final ClientLogger logger; private final Flux<AmqpEndpointState> endpointStates; private final TokenManager tokenManager; private final MessageSerializer messageSerializer; private final AmqpRetryPolicy retry; private final AmqpRetryOptions retryOptions; private final String activeTimeoutMessage; private final Scheduler scheduler; private final AmqpMetricsProvider metricsProvider; private final Object errorConditionLock = new Object(); private volatile Exception lastKnownLinkError; private volatile Instant lastKnownErrorReportedAt; private volatile int linkSize; /** * Creates an instance of {@link ReactorSender}. * * @param amqpConnection The parent {@link AmqpConnection} that this sender lives in. * @param entityPath The message broker address for the sender. * @param sender The underlying proton-j sender. * @param handler The proton-j handler associated with the sender. * @param reactorProvider Provider to schedule work on the proton-j reactor. * @param tokenManager Token manager for authorising with the CBS node. Can be {@code null} if it is part of the * transaction manager. * @param messageSerializer Serializer to deserialise and serialize AMQP messages. * @param retryOptions Retry options. * @param scheduler Scheduler to schedule send timeout. */ ReactorSender(AmqpConnection amqpConnection, String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider, TokenManager tokenManager, MessageSerializer messageSerializer, AmqpRetryOptions retryOptions, Scheduler scheduler, AmqpMetricsProvider metricsProvider) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.sender = Objects.requireNonNull(sender, "'sender' cannot be null."); this.handler = Objects.requireNonNull(handler, "'handler' cannot be null."); this.reactorProvider = Objects.requireNonNull(reactorProvider, "'reactorProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.scheduler = Objects.requireNonNull(scheduler, "'scheduler' cannot be null."); this.retry = RetryUtil.getRetryPolicy(retryOptions); this.tokenManager = tokenManager; this.metricsProvider = metricsProvider; String connectionId = handler.getConnectionId() == null ? NOT_APPLICABLE : handler.getConnectionId(); String linkName = getLinkName() == null ? NOT_APPLICABLE : getLinkName(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(LINK_NAME_KEY, linkName); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(ReactorSender.class, loggingContext); this.activeTimeoutMessage = String.format( "ReactorSender connectionId[%s] linkName[%s]: Waiting for send and receive handler to be ACTIVE", handler.getConnectionId(), handler.getLinkName()); this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.verbose("State {}", state); this.hasConnected.set(state == EndpointState.ACTIVE); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { hasConnected.set(false); handleError(error); }) .doOnComplete(() -> { hasConnected.set(false); handleClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage), this.handler.getLinkCredits().subscribe(credit -> { logger.atVerbose().addKeyValue("credits", credit) .log("Credits on link."); this.scheduleWorkOnDispatcher(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); hasConnected.set(false); return closeAsync("Connection shutdown.", null); }).subscribe() ); if (tokenManager != null) { this.subscriptions.add(tokenManager.getAuthorizationResults().onErrorResume(error -> { final Mono<Void> operation = closeAsync(String.format("connectionId[%s] linkName[%s] Token renewal failure. Disposing send " + "link.", amqpConnection.getId(), getLinkName()), new ErrorCondition(Symbol.getSymbol(NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> { logger.atVerbose().addKeyValue("response", response) .log("Token refreshed."); }, error -> { }, () -> { logger.verbose(" Authorization completed. Disposing."); closeAsync("Authorization completed. Disposing.", null).subscribe(); })); } } @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } @Override public Mono<Void> send(Message message) { return send(message, null); } @Override public Mono<Void> send(Message message, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish message when disposed.", handler.getConnectionId(), getLinkName()))); } return getLinkSize() .flatMap(maxMessageSize -> { final int payloadSize = messageSerializer.getSize(message); final int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] bytes = new byte[allocationSize]; int encodedSize; try { encodedSize = message.encode(bytes, 0, allocationSize); } catch (BufferOverflowException exception) { final String errorMessage = String.format(Locale.US, "Error sending. Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024); final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, errorMessage, exception, handler.getErrorContext(sender)); return Mono.error(error); } return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState); }).then(); } @Override public Mono<Void> send(List<Message> messageBatch) { return send(messageBatch, null); } @Override public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish data batch when disposed.", handler.getConnectionId(), getLinkName()))); } if (messageBatch.size() == 1) { return send(messageBatch.get(0), deliveryState); } return getLinkSize() .flatMap(maxMessageSize -> { int totalEncodedSize = 0; final CompositeReadableBuffer buffer = new CompositeReadableBuffer(); final byte[] envelopBytes = batchEnvelopBytes(messageBatch.get(0), maxMessageSize); if (envelopBytes.length > 0) { totalEncodedSize += envelopBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(envelopBytes); } for (final Message message : messageBatch) { final byte[] sectionBytes = batchBinaryDataSectionBytes(message, maxMessageSize); if (sectionBytes.length > 0) { totalEncodedSize += sectionBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(sectionBytes); } } return send(buffer, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState); }).then(); } private byte[] batchEnvelopBytes(Message envelopMessage, int maxMessageSize) { final Message message = Proton.message(); message.setMessageAnnotations(envelopMessage.getMessageAnnotations()); final int size = messageSerializer.getSize(message); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = message.encode(encodedBytes, 0, allocationSize); return Arrays.copyOf(encodedBytes, encodedSize); } private byte[] batchBinaryDataSectionBytes(Message sectionMessage, int maxMessageSize) { final int size = messageSerializer.getSize(sectionMessage); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = sectionMessage.encode(encodedBytes, 0, allocationSize); final Message message = Proton.message(); final Data binaryData = new Data(new Binary(encodedBytes, 0, encodedSize)); message.setBody(binaryData); final int binaryRawSize = binaryData.getValue().getLength(); final int binaryEncodedSize = binaryEncodedSize(binaryRawSize); final byte[] binaryEncodedBytes = new byte[binaryEncodedSize]; message.encode(binaryEncodedBytes, 0, binaryEncodedSize); return binaryEncodedBytes; } private Mono<Void> batchBufferOverflowError(int maxMessageSize) { return Mono.error(new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), new BufferOverflowException(), handler.getErrorContext(sender))); } /** * Compute the encoded size when encoding a binary data of given size per Amqp 1.0 spec "amqp:data:binary" format. * * @param binaryRawSize the length of the binary data. * @return the encoded size. */ private int binaryEncodedSize(int binaryRawSize) { if (binaryRawSize <= 255) { return 5 + binaryRawSize; } else { return 8 + binaryRawSize; } } @Override public AmqpErrorContext getErrorContext() { return handler.getErrorContext(sender); } @Override public String getLinkName() { return sender.getName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override public Mono<Integer> getLinkSize() { if (linkSize > 0) { return Mono.defer(() -> Mono.just(this.linkSize)); } synchronized (this) { if (linkSize > 0) { return Mono.defer(() -> Mono.just(linkSize)); } return RetryUtil.withRetry(getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE), retryOptions, activeTimeoutMessage) .then(Mono.fromCallable(() -> { final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize(); if (remoteMaxMessageSize != null) { linkSize = remoteMaxMessageSize.intValue(); } else { logger.warning("Could not get the getRemoteMaxMessageSize. Returning current link size: {}", linkSize); } return linkSize; })); } } @Override public boolean isDisposed() { return isDisposed.get(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void dispose() { close(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } /** * Disposes of the sender. * * @param errorCondition Error condition associated with close operation. * @param message Message associated with why the sender was closed. * * @return A mono that completes when the send link has closed. */ Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return isClosedMono.asMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .log("Setting error condition and disposing. {}", message); final Runnable closeWork = () -> { if (errorCondition != null && sender.getCondition() == null) { sender.setCondition(errorCondition); } sender.close(); }; return Mono.fromRunnable(() -> { try { reactorProvider.getReactorDispatcher().invoke(closeWork); } catch (IOException e) { logger.warning("Could not schedule close work. Running manually. And completing close.", e); closeWork.run(); handleClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException scheduling close work. And completing close."); closeWork.run(); handleClose(); } }).then(isClosedMono.asMono()) .publishOn(Schedulers.boundedElastic()); } /** * A mono that completes when the sender has completely closed. * * @return mono that completes when the sender has completely closed. */ Mono<Void> isClosed() { return isClosedMono.asMono(); } @Override Mono<DeliveryState> send(ReadableBuffer buffer, int messageFormat, DeliveryState deliveryState) { final Flux<EndpointState> activeEndpointFlux = RetryUtil.withRetry( handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), retryOptions, activeTimeoutMessage); return activeEndpointFlux.then(Mono.create(sink -> { sendWork(new RetriableWorkItem(buffer, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } /** * Add the work item in pending send to be processed on {@link ReactorDispatcher} thread. * * @param workItem to be processed. */ private void sendWork(RetriableWorkItem workItem) { final String deliveryTag = UUID.randomUUID().toString().replace("-", ""); synchronized (pendingSendLock) { this.pendingSendsMap.put(deliveryTag, workItem); this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0)); } this.scheduleWorkOnDispatcher(); } /** * Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke() */ private void processSendWork() { if (!hasConnected.get()) { logger.warning("Not connected. Not processing send work."); return; } if (isDisposed.get()) { logger.info("Sender is closed. Not executing work."); return; } while (hasConnected.get() && sender.getCredit() > 0) { final WeightedDeliveryTag weightedDelivery; final RetriableWorkItem workItem; final String deliveryTag; synchronized (pendingSendLock) { weightedDelivery = this.pendingSendsQueue.poll(); if (weightedDelivery != null) { deliveryTag = weightedDelivery.getDeliveryTag(); workItem = this.pendingSendsMap.get(deliveryTag); } else { workItem = null; deliveryTag = null; } } if (workItem == null) { if (deliveryTag != null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("sendData not found for this delivery."); } break; } Delivery delivery = null; boolean linkAdvance = false; int sentMsgSize = 0; Exception sendException = null; try { workItem.beforeTry(); delivery = sender.delivery(deliveryTag.getBytes(UTF_8)); delivery.setMessageFormat(workItem.getMessageFormat()); if (workItem.isDeliveryStateProvided()) { delivery.disposition(workItem.getDeliveryState()); } final ReadableBuffer encodedBuffer = workItem.getEncodedBuffer(); encodedBuffer.rewind(); sentMsgSize = sender.send(encodedBuffer); assert sentMsgSize == workItem.getEncodedMessageSize() : "Contract of the ProtonJ library for Sender. Send API changed"; linkAdvance = sender.advance(); } catch (Exception exception) { sendException = exception; } if (linkAdvance) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Sent message."); workItem.setWaitingForAck(); scheduler.schedule(new SendTimeout(deliveryTag), retryOptions.getTryTimeout().toMillis(), TimeUnit.MILLISECONDS); } else { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("sentMessageSize", sentMsgSize) .addKeyValue("payloadActualSize", workItem.getEncodedMessageSize()) .log("Sendlink advance failed."); DeliveryState outcome = null; if (delivery != null) { outcome = delivery.getRemoteState(); delivery.free(); } final AmqpErrorContext context = handler.getErrorContext(sender); final Throwable exception = sendException != null ? new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed. Please see cause for more details", entityPath), sendException, context) : new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed while advancing delivery(tag: %s).", entityPath, deliveryTag), context); workItem.error(exception, outcome); } } } private void processDeliveredMessage(Delivery delivery) { final DeliveryState outcome = delivery.getRemoteState(); final String deliveryTag = new String(delivery.getTag(), UTF_8); logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Process delivered message."); final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Mismatch (or send timed out)."); return; } else if (workItem.isDeliveryStateProvided()) { workItem.success(outcome); return; } if (outcome instanceof Accepted) { synchronized (errorConditionLock) { lastKnownLinkError = null; lastKnownErrorReportedAt = null; retryAttempts.set(0); } workItem.success(outcome); } else if (outcome instanceof Rejected) { final Rejected rejected = (Rejected) outcome; final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError(); final Exception exception = ExceptionUtil.toException(error.getCondition().toString(), error.getDescription(), handler.getErrorContext(sender)); logger.atWarning() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("rejected", rejected) .log("Delivery rejected."); final int retryAttempt; if (isGeneralSendError(error.getCondition())) { synchronized (errorConditionLock) { lastKnownLinkError = exception; lastKnownErrorReportedAt = Instant.now(); retryAttempt = retryAttempts.incrementAndGet(); } } else { retryAttempt = retryAttempts.get(); } final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt); if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) { cleanupFailedSend(workItem, exception, outcome); } else { workItem.setLastKnownException(exception); try { reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval); } catch (IOException | RejectedExecutionException schedulerException) { exception.initCause(schedulerException); cleanupFailedSend( workItem, new AmqpException(false, String.format(Locale.US, "Entity(%s): send operation failed while scheduling a" + " retry on Reactor, see cause for more details.", entityPath), schedulerException, handler.getErrorContext(sender)), outcome); } } } else if (outcome instanceof Released) { cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(), handler.getErrorContext(sender)), outcome); } else if (outcome instanceof Declared) { final Declared declared = (Declared) outcome; workItem.success(declared); } else { cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(), handler.getErrorContext(sender)), outcome); } } private void scheduleWorkOnDispatcher() { try { reactorProvider.getReactorDispatcher().invoke(this::processSendWork); } catch (IOException e) { logger.warning("Error scheduling work on reactor.", e); } catch (RejectedExecutionException e) { logger.info("Error scheduling work on reactor because of RejectedExecutionException."); } } private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception, final DeliveryState deliveryState) { workItem.error(exception, deliveryState); } private void completeClose() { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result).log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } } /** * Clears pending sends and puts an error in there. * * @param error Error to pass to pending sends. */ private void handleError(Throwable error) { synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed. Dropping error."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends with error."); } pendingSendsMap.forEach((key, value) -> value.error(error, null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private void handleClose() { final String message = String.format("Could not complete sends because link '%s' for '%s' is closed.", getLinkName(), entityPath); final AmqpErrorContext context = handler.getErrorContext(sender); synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends."); } pendingSendsMap.forEach((key, value) -> value.error(new AmqpException(true, message, context), null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private static boolean isGeneralSendError(Symbol amqpError) { return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR || amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED); } private static class WeightedDeliveryTag { private final String deliveryTag; private final int priority; WeightedDeliveryTag(final String deliveryTag, final int priority) { this.deliveryTag = deliveryTag; this.priority = priority; } private String getDeliveryTag() { return this.deliveryTag; } private int getPriority() { return this.priority; } } private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable { private static final long serialVersionUID = -7057500582037295635L; @Override public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) { return deliveryTag1.getPriority() - deliveryTag0.getPriority(); } } /** * Keeps track of messages that have been sent, but may not have been acknowledged by the service. */ private class SendTimeout implements Runnable { private final String deliveryTag; SendTimeout(String deliveryTag) { this.deliveryTag = deliveryTag; } @Override public void run() { final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { return; } Exception cause = lastKnownLinkError; final Exception lastError; final Instant lastErrorTime; synchronized (errorConditionLock) { lastError = lastKnownLinkError; lastErrorTime = lastKnownErrorReportedAt; } if (lastError != null && lastErrorTime != null) { final Instant now = Instant.now(); final boolean isLastErrorAfterSleepTime = lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS)); final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime; final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(retryOptions.getTryTimeout())); cause = isServerBusy || isLastErrorAfterOperationTimeout ? lastError : null; } final AmqpException exception; if (cause instanceof AmqpException) { exception = (AmqpException) cause; } else { exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR, String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath), handler.getErrorContext(sender)); } workItem.error(exception, null); } } }
class ReactorSender implements AmqpSendLink, AsyncCloseable, AutoCloseable { private static final String DELIVERY_TAG_KEY = "deliveryTag"; private static final String PENDING_SENDS_SIZE_KEY = "pending_sends_size"; private final String entityPath; private final Sender sender; private final SendLinkHandler handler; private final ReactorProvider reactorProvider; private final Disposable.Composite subscriptions; private final AtomicBoolean hasConnected = new AtomicBoolean(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final Object pendingSendLock = new Object(); private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>(); private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue = new PriorityQueue<>(1000, new DeliveryTagComparator()); private final ClientLogger logger; private final Flux<AmqpEndpointState> endpointStates; private final TokenManager tokenManager; private final MessageSerializer messageSerializer; private final AmqpRetryPolicy retry; private final AmqpRetryOptions retryOptions; private final String activeTimeoutMessage; private final Scheduler scheduler; private final AmqpMetricsProvider metricsProvider; private final Object errorConditionLock = new Object(); private volatile Exception lastKnownLinkError; private volatile Instant lastKnownErrorReportedAt; private volatile int linkSize; /** * Creates an instance of {@link ReactorSender}. * * @param amqpConnection The parent {@link AmqpConnection} that this sender lives in. * @param entityPath The message broker address for the sender. * @param sender The underlying proton-j sender. * @param handler The proton-j handler associated with the sender. * @param reactorProvider Provider to schedule work on the proton-j reactor. * @param tokenManager Token manager for authorising with the CBS node. Can be {@code null} if it is part of the * transaction manager. * @param messageSerializer Serializer to deserialise and serialize AMQP messages. * @param retryOptions Retry options. * @param scheduler Scheduler to schedule send timeout. */ ReactorSender(AmqpConnection amqpConnection, String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider, TokenManager tokenManager, MessageSerializer messageSerializer, AmqpRetryOptions retryOptions, Scheduler scheduler, AmqpMetricsProvider metricsProvider) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.sender = Objects.requireNonNull(sender, "'sender' cannot be null."); this.handler = Objects.requireNonNull(handler, "'handler' cannot be null."); this.reactorProvider = Objects.requireNonNull(reactorProvider, "'reactorProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.scheduler = Objects.requireNonNull(scheduler, "'scheduler' cannot be null."); this.retry = RetryUtil.getRetryPolicy(retryOptions); this.tokenManager = tokenManager; this.metricsProvider = metricsProvider; String connectionId = handler.getConnectionId() == null ? NOT_APPLICABLE : handler.getConnectionId(); String linkName = getLinkName() == null ? NOT_APPLICABLE : getLinkName(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(LINK_NAME_KEY, linkName); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(ReactorSender.class, loggingContext); this.activeTimeoutMessage = String.format( "ReactorSender connectionId[%s] linkName[%s]: Waiting for send and receive handler to be ACTIVE", handler.getConnectionId(), handler.getLinkName()); this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.verbose("State {}", state); this.hasConnected.set(state == EndpointState.ACTIVE); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { hasConnected.set(false); handleError(error); }) .doOnComplete(() -> { hasConnected.set(false); handleClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage), this.handler.getLinkCredits().subscribe(credit -> { logger.atVerbose().addKeyValue("credits", credit) .log("Credits on link."); this.scheduleWorkOnDispatcher(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); hasConnected.set(false); return closeAsync("Connection shutdown.", null); }).subscribe() ); if (tokenManager != null) { this.subscriptions.add(tokenManager.getAuthorizationResults().onErrorResume(error -> { final Mono<Void> operation = closeAsync(String.format("connectionId[%s] linkName[%s] Token renewal failure. Disposing send " + "link.", amqpConnection.getId(), getLinkName()), new ErrorCondition(Symbol.getSymbol(NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> { logger.atVerbose().addKeyValue("response", response) .log("Token refreshed."); }, error -> { }, () -> { logger.verbose(" Authorization completed. Disposing."); closeAsync("Authorization completed. Disposing.", null).subscribe(); })); } } @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } @Override public Mono<Void> send(Message message) { return send(message, null); } @Override public Mono<Void> send(Message message, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish message when disposed.", handler.getConnectionId(), getLinkName()))); } return getLinkSize() .flatMap(maxMessageSize -> { final int payloadSize = messageSerializer.getSize(message); final int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] bytes = new byte[allocationSize]; int encodedSize; try { encodedSize = message.encode(bytes, 0, allocationSize); } catch (BufferOverflowException exception) { final String errorMessage = String.format(Locale.US, "Error sending. Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024); final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, errorMessage, exception, handler.getErrorContext(sender)); return Mono.error(error); } return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState); }).then(); } @Override public Mono<Void> send(List<Message> messageBatch) { return send(messageBatch, null); } @Override public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish data batch when disposed.", handler.getConnectionId(), getLinkName()))); } if (messageBatch.size() == 1) { return send(messageBatch.get(0), deliveryState); } return getLinkSize() .flatMap(maxMessageSize -> { int totalEncodedSize = 0; final CompositeReadableBuffer buffer = new CompositeReadableBuffer(); final byte[] envelopBytes = batchEnvelopBytes(messageBatch.get(0), maxMessageSize); if (envelopBytes.length > 0) { totalEncodedSize += envelopBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(envelopBytes); } for (final Message message : messageBatch) { final byte[] sectionBytes = batchBinaryDataSectionBytes(message, maxMessageSize); if (sectionBytes.length > 0) { totalEncodedSize += sectionBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(sectionBytes); } else { logger.info("Ignoring the empty message org.apache.qpid.proton.message.message@{} in the batch.", Integer.toHexString(System.identityHashCode(message))); } } return send(buffer, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState); }).then(); } private byte[] batchEnvelopBytes(Message envelopMessage, int maxMessageSize) { final Message message = Proton.message(); message.setMessageAnnotations(envelopMessage.getMessageAnnotations()); if ((envelopMessage.getMessageId() instanceof String) && !CoreUtils.isNullOrEmpty((String) envelopMessage.getMessageId())) { message.setMessageId(envelopMessage.getMessageId()); } if (!CoreUtils.isNullOrEmpty(envelopMessage.getGroupId())) { message.setGroupId(envelopMessage.getGroupId()); } final int size = messageSerializer.getSize(message); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = message.encode(encodedBytes, 0, allocationSize); return Arrays.copyOf(encodedBytes, encodedSize); } private byte[] batchBinaryDataSectionBytes(Message sectionMessage, int maxMessageSize) { final int size = messageSerializer.getSize(sectionMessage); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = sectionMessage.encode(encodedBytes, 0, allocationSize); final Message message = Proton.message(); final Data binaryData = new Data(new Binary(encodedBytes, 0, encodedSize)); message.setBody(binaryData); final int binaryRawSize = binaryData.getValue().getLength(); final int binaryEncodedSize = binaryEncodedSize(binaryRawSize); final byte[] binaryEncodedBytes = new byte[binaryEncodedSize]; message.encode(binaryEncodedBytes, 0, binaryEncodedSize); return binaryEncodedBytes; } private Mono<Void> batchBufferOverflowError(int maxMessageSize) { return FluxUtil.monoError(logger, new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), new BufferOverflowException(), handler.getErrorContext(sender))); } /** * Compute the encoded size when encoding a binary data of given size per Amqp 1.0 spec "amqp:data:binary" format. * * @param binaryRawSize the length of the binary data. * @return the encoded size. */ private int binaryEncodedSize(int binaryRawSize) { if (binaryRawSize <= 255) { return 5 + binaryRawSize; } else { return 8 + binaryRawSize; } } @Override public AmqpErrorContext getErrorContext() { return handler.getErrorContext(sender); } @Override public String getLinkName() { return sender.getName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override public Mono<Integer> getLinkSize() { if (linkSize > 0) { return Mono.defer(() -> Mono.just(this.linkSize)); } synchronized (this) { if (linkSize > 0) { return Mono.defer(() -> Mono.just(linkSize)); } return RetryUtil.withRetry(getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE), retryOptions, activeTimeoutMessage) .then(Mono.fromCallable(() -> { final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize(); if (remoteMaxMessageSize != null) { linkSize = remoteMaxMessageSize.intValue(); } else { logger.warning("Could not get the getRemoteMaxMessageSize. Returning current link size: {}", linkSize); } return linkSize; })); } } @Override public boolean isDisposed() { return isDisposed.get(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void dispose() { close(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } /** * Disposes of the sender. * * @param errorCondition Error condition associated with close operation. * @param message Message associated with why the sender was closed. * * @return A mono that completes when the send link has closed. */ Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return isClosedMono.asMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .log("Setting error condition and disposing. {}", message); final Runnable closeWork = () -> { if (errorCondition != null && sender.getCondition() == null) { sender.setCondition(errorCondition); } sender.close(); }; return Mono.fromRunnable(() -> { try { reactorProvider.getReactorDispatcher().invoke(closeWork); } catch (IOException e) { logger.warning("Could not schedule close work. Running manually. And completing close.", e); closeWork.run(); handleClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException scheduling close work. And completing close."); closeWork.run(); handleClose(); } }).then(isClosedMono.asMono()) .publishOn(Schedulers.boundedElastic()); } /** * A mono that completes when the sender has completely closed. * * @return mono that completes when the sender has completely closed. */ Mono<Void> isClosed() { return isClosedMono.asMono(); } @Override Mono<DeliveryState> send(ReadableBuffer buffer, int messageFormat, DeliveryState deliveryState) { return onEndpointActive().then(Mono.create(sink -> { sendWork(new RetriableWorkItem(buffer, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } private Flux<EndpointState> onEndpointActive() { return RetryUtil.withRetry(handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), retryOptions, activeTimeoutMessage); } /** * Add the work item in pending send to be processed on {@link ReactorDispatcher} thread. * * @param workItem to be processed. */ private void sendWork(RetriableWorkItem workItem) { final String deliveryTag = UUID.randomUUID().toString().replace("-", ""); synchronized (pendingSendLock) { this.pendingSendsMap.put(deliveryTag, workItem); this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0)); } this.scheduleWorkOnDispatcher(); } /** * Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke() */ private void processSendWork() { if (!hasConnected.get()) { logger.warning("Not connected. Not processing send work."); return; } if (isDisposed.get()) { logger.info("Sender is closed. Not executing work."); return; } while (hasConnected.get() && sender.getCredit() > 0) { final WeightedDeliveryTag weightedDelivery; final RetriableWorkItem workItem; final String deliveryTag; synchronized (pendingSendLock) { weightedDelivery = this.pendingSendsQueue.poll(); if (weightedDelivery != null) { deliveryTag = weightedDelivery.getDeliveryTag(); workItem = this.pendingSendsMap.get(deliveryTag); } else { workItem = null; deliveryTag = null; } } if (workItem == null) { if (deliveryTag != null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("sendData not found for this delivery."); } break; } Delivery delivery = null; boolean linkAdvance = false; int sentMsgSize = 0; Exception sendException = null; try { workItem.beforeTry(); delivery = sender.delivery(deliveryTag.getBytes(UTF_8)); delivery.setMessageFormat(workItem.getMessageFormat()); if (workItem.isDeliveryStateProvided()) { delivery.disposition(workItem.getDeliveryState()); } workItem.send(sender); linkAdvance = sender.advance(); } catch (Exception exception) { sendException = exception; } if (linkAdvance) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Sent message."); workItem.setWaitingForAck(); scheduler.schedule(new SendTimeout(deliveryTag), retryOptions.getTryTimeout().toMillis(), TimeUnit.MILLISECONDS); } else { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("sentMessageSize", sentMsgSize) .addKeyValue("payloadActualSize", workItem.getEncodedMessageSize()) .log("Sendlink advance failed."); DeliveryState outcome = null; if (delivery != null) { outcome = delivery.getRemoteState(); delivery.free(); } final AmqpErrorContext context = handler.getErrorContext(sender); final Throwable exception = sendException != null ? new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed. Please see cause for more details", entityPath), sendException, context) : new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed while advancing delivery(tag: %s).", entityPath, deliveryTag), context); workItem.error(exception, outcome); } } } private void processDeliveredMessage(Delivery delivery) { final DeliveryState outcome = delivery.getRemoteState(); final String deliveryTag = new String(delivery.getTag(), UTF_8); logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Process delivered message."); final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Mismatch (or send timed out)."); return; } else if (workItem.isDeliveryStateProvided()) { workItem.success(outcome); return; } if (outcome instanceof Accepted) { synchronized (errorConditionLock) { lastKnownLinkError = null; lastKnownErrorReportedAt = null; retryAttempts.set(0); } workItem.success(outcome); } else if (outcome instanceof Rejected) { final Rejected rejected = (Rejected) outcome; final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError(); final Exception exception = ExceptionUtil.toException(error.getCondition().toString(), error.getDescription(), handler.getErrorContext(sender)); logger.atWarning() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("rejected", rejected) .log("Delivery rejected."); final int retryAttempt; if (isGeneralSendError(error.getCondition())) { synchronized (errorConditionLock) { lastKnownLinkError = exception; lastKnownErrorReportedAt = Instant.now(); retryAttempt = retryAttempts.incrementAndGet(); } } else { retryAttempt = retryAttempts.get(); } final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt); if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) { cleanupFailedSend(workItem, exception, outcome); } else { workItem.setLastKnownException(exception); try { reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval); } catch (IOException | RejectedExecutionException schedulerException) { exception.initCause(schedulerException); cleanupFailedSend( workItem, new AmqpException(false, String.format(Locale.US, "Entity(%s): send operation failed while scheduling a" + " retry on Reactor, see cause for more details.", entityPath), schedulerException, handler.getErrorContext(sender)), outcome); } } } else if (outcome instanceof Released) { cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(), handler.getErrorContext(sender)), outcome); } else if (outcome instanceof Declared) { final Declared declared = (Declared) outcome; workItem.success(declared); } else { cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(), handler.getErrorContext(sender)), outcome); } } private void scheduleWorkOnDispatcher() { try { reactorProvider.getReactorDispatcher().invoke(this::processSendWork); } catch (IOException e) { logger.warning("Error scheduling work on reactor.", e); } catch (RejectedExecutionException e) { logger.info("Error scheduling work on reactor because of RejectedExecutionException."); } } private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception, final DeliveryState deliveryState) { workItem.error(exception, deliveryState); } private void completeClose() { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result).log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } } /** * Clears pending sends and puts an error in there. * * @param error Error to pass to pending sends. */ private void handleError(Throwable error) { synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed. Dropping error."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends with error."); } pendingSendsMap.forEach((key, value) -> value.error(error, null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private void handleClose() { final String message = String.format("Could not complete sends because link '%s' for '%s' is closed.", getLinkName(), entityPath); final AmqpErrorContext context = handler.getErrorContext(sender); synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends."); } pendingSendsMap.forEach((key, value) -> value.error(new AmqpException(true, message, context), null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private static boolean isGeneralSendError(Symbol amqpError) { return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR || amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED); } private static class WeightedDeliveryTag { private final String deliveryTag; private final int priority; WeightedDeliveryTag(final String deliveryTag, final int priority) { this.deliveryTag = deliveryTag; this.priority = priority; } private String getDeliveryTag() { return this.deliveryTag; } private int getPriority() { return this.priority; } } private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable { private static final long serialVersionUID = -7057500582037295635L; @Override public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) { return deliveryTag1.getPriority() - deliveryTag0.getPriority(); } } /** * Keeps track of messages that have been sent, but may not have been acknowledged by the service. */ private class SendTimeout implements Runnable { private final String deliveryTag; SendTimeout(String deliveryTag) { this.deliveryTag = deliveryTag; } @Override public void run() { final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { return; } Exception cause = lastKnownLinkError; final Exception lastError; final Instant lastErrorTime; synchronized (errorConditionLock) { lastError = lastKnownLinkError; lastErrorTime = lastKnownErrorReportedAt; } if (lastError != null && lastErrorTime != null) { final Instant now = Instant.now(); final boolean isLastErrorAfterSleepTime = lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS)); final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime; final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(retryOptions.getTryTimeout())); cause = isServerBusy || isLastErrorAfterOperationTimeout ? lastError : null; } final AmqpException exception; if (cause instanceof AmqpException) { exception = (AmqpException) cause; } else { exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR, String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath), handler.getErrorContext(sender)); } workItem.error(exception, null); } } }
Yes, that's intentional. The CompositeReadableBuffer has an optimized path when it wraps only one byte[] array. When sending such an inner byte[], the byte[] is directly used in fast path [here](https://github.com/apache/qpid-proton-j/blob/8c1f2326d46b9a67ae14bc3431acc6cddfbb7524/proton-j/src/main/java/org/apache/qpid/proton/engine/impl/DeliveryImpl.java#L379) (with allocation equivalent to old way [here](https://github.com/apache/qpid-proton-j/blob/8c1f2326d46b9a67ae14bc3431acc6cddfbb7524/proton-j/src/main/java/org/apache/qpid/proton/engine/impl/DeliveryImpl.java#L342))
public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) { final CompositeReadableBuffer buffer = new CompositeReadableBuffer(); buffer.append(bytes).limit(arrayOffset); return send(buffer, messageFormat, deliveryState); }
final CompositeReadableBuffer buffer = new CompositeReadableBuffer();
public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) { return onEndpointActive().then(Mono.create(sink -> { sendWork(new RetriableWorkItem(bytes, arrayOffset, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); }
class ReactorSender implements AmqpSendLink, AsyncCloseable, AutoCloseable { private static final String DELIVERY_TAG_KEY = "deliveryTag"; private static final String PENDING_SENDS_SIZE_KEY = "pending_sends_size"; private final String entityPath; private final Sender sender; private final SendLinkHandler handler; private final ReactorProvider reactorProvider; private final Disposable.Composite subscriptions; private final AtomicBoolean hasConnected = new AtomicBoolean(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final Object pendingSendLock = new Object(); private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>(); private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue = new PriorityQueue<>(1000, new DeliveryTagComparator()); private final ClientLogger logger; private final Flux<AmqpEndpointState> endpointStates; private final TokenManager tokenManager; private final MessageSerializer messageSerializer; private final AmqpRetryPolicy retry; private final AmqpRetryOptions retryOptions; private final String activeTimeoutMessage; private final Scheduler scheduler; private final AmqpMetricsProvider metricsProvider; private final Object errorConditionLock = new Object(); private volatile Exception lastKnownLinkError; private volatile Instant lastKnownErrorReportedAt; private volatile int linkSize; /** * Creates an instance of {@link ReactorSender}. * * @param amqpConnection The parent {@link AmqpConnection} that this sender lives in. * @param entityPath The message broker address for the sender. * @param sender The underlying proton-j sender. * @param handler The proton-j handler associated with the sender. * @param reactorProvider Provider to schedule work on the proton-j reactor. * @param tokenManager Token manager for authorising with the CBS node. Can be {@code null} if it is part of the * transaction manager. * @param messageSerializer Serializer to deserialise and serialize AMQP messages. * @param retryOptions Retry options. * @param scheduler Scheduler to schedule send timeout. */ ReactorSender(AmqpConnection amqpConnection, String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider, TokenManager tokenManager, MessageSerializer messageSerializer, AmqpRetryOptions retryOptions, Scheduler scheduler, AmqpMetricsProvider metricsProvider) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.sender = Objects.requireNonNull(sender, "'sender' cannot be null."); this.handler = Objects.requireNonNull(handler, "'handler' cannot be null."); this.reactorProvider = Objects.requireNonNull(reactorProvider, "'reactorProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.scheduler = Objects.requireNonNull(scheduler, "'scheduler' cannot be null."); this.retry = RetryUtil.getRetryPolicy(retryOptions); this.tokenManager = tokenManager; this.metricsProvider = metricsProvider; String connectionId = handler.getConnectionId() == null ? NOT_APPLICABLE : handler.getConnectionId(); String linkName = getLinkName() == null ? NOT_APPLICABLE : getLinkName(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(LINK_NAME_KEY, linkName); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(ReactorSender.class, loggingContext); this.activeTimeoutMessage = String.format( "ReactorSender connectionId[%s] linkName[%s]: Waiting for send and receive handler to be ACTIVE", handler.getConnectionId(), handler.getLinkName()); this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.verbose("State {}", state); this.hasConnected.set(state == EndpointState.ACTIVE); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { hasConnected.set(false); handleError(error); }) .doOnComplete(() -> { hasConnected.set(false); handleClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage), this.handler.getLinkCredits().subscribe(credit -> { logger.atVerbose().addKeyValue("credits", credit) .log("Credits on link."); this.scheduleWorkOnDispatcher(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); hasConnected.set(false); return closeAsync("Connection shutdown.", null); }).subscribe() ); if (tokenManager != null) { this.subscriptions.add(tokenManager.getAuthorizationResults().onErrorResume(error -> { final Mono<Void> operation = closeAsync(String.format("connectionId[%s] linkName[%s] Token renewal failure. Disposing send " + "link.", amqpConnection.getId(), getLinkName()), new ErrorCondition(Symbol.getSymbol(NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> { logger.atVerbose().addKeyValue("response", response) .log("Token refreshed."); }, error -> { }, () -> { logger.verbose(" Authorization completed. Disposing."); closeAsync("Authorization completed. Disposing.", null).subscribe(); })); } } @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } @Override public Mono<Void> send(Message message) { return send(message, null); } @Override public Mono<Void> send(Message message, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish message when disposed.", handler.getConnectionId(), getLinkName()))); } return getLinkSize() .flatMap(maxMessageSize -> { final int payloadSize = messageSerializer.getSize(message); final int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] bytes = new byte[allocationSize]; int encodedSize; try { encodedSize = message.encode(bytes, 0, allocationSize); } catch (BufferOverflowException exception) { final String errorMessage = String.format(Locale.US, "Error sending. Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024); final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, errorMessage, exception, handler.getErrorContext(sender)); return Mono.error(error); } return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState); }).then(); } @Override public Mono<Void> send(List<Message> messageBatch) { return send(messageBatch, null); } @Override public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish data batch when disposed.", handler.getConnectionId(), getLinkName()))); } if (messageBatch.size() == 1) { return send(messageBatch.get(0), deliveryState); } return getLinkSize() .flatMap(maxMessageSize -> { int totalEncodedSize = 0; final CompositeReadableBuffer buffer = new CompositeReadableBuffer(); final byte[] envelopBytes = batchEnvelopBytes(messageBatch.get(0), maxMessageSize); if (envelopBytes.length > 0) { totalEncodedSize += envelopBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(envelopBytes); } for (final Message message : messageBatch) { final byte[] sectionBytes = batchBinaryDataSectionBytes(message, maxMessageSize); if (sectionBytes.length > 0) { totalEncodedSize += sectionBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(sectionBytes); } } return send(buffer, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState); }).then(); } private byte[] batchEnvelopBytes(Message envelopMessage, int maxMessageSize) { final Message message = Proton.message(); message.setMessageAnnotations(envelopMessage.getMessageAnnotations()); final int size = messageSerializer.getSize(message); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = message.encode(encodedBytes, 0, allocationSize); return Arrays.copyOf(encodedBytes, encodedSize); } private byte[] batchBinaryDataSectionBytes(Message sectionMessage, int maxMessageSize) { final int size = messageSerializer.getSize(sectionMessage); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = sectionMessage.encode(encodedBytes, 0, allocationSize); final Message message = Proton.message(); final Data binaryData = new Data(new Binary(encodedBytes, 0, encodedSize)); message.setBody(binaryData); final int binaryRawSize = binaryData.getValue().getLength(); final int binaryEncodedSize = binaryEncodedSize(binaryRawSize); final byte[] binaryEncodedBytes = new byte[binaryEncodedSize]; message.encode(binaryEncodedBytes, 0, binaryEncodedSize); return binaryEncodedBytes; } private Mono<Void> batchBufferOverflowError(int maxMessageSize) { return Mono.error(new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), new BufferOverflowException(), handler.getErrorContext(sender))); } /** * Compute the encoded size when encoding a binary data of given size per Amqp 1.0 spec "amqp:data:binary" format. * * @param binaryRawSize the length of the binary data. * @return the encoded size. */ private int binaryEncodedSize(int binaryRawSize) { if (binaryRawSize <= 255) { return 5 + binaryRawSize; } else { return 8 + binaryRawSize; } } @Override public AmqpErrorContext getErrorContext() { return handler.getErrorContext(sender); } @Override public String getLinkName() { return sender.getName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override public Mono<Integer> getLinkSize() { if (linkSize > 0) { return Mono.defer(() -> Mono.just(this.linkSize)); } synchronized (this) { if (linkSize > 0) { return Mono.defer(() -> Mono.just(linkSize)); } return RetryUtil.withRetry(getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE), retryOptions, activeTimeoutMessage) .then(Mono.fromCallable(() -> { final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize(); if (remoteMaxMessageSize != null) { linkSize = remoteMaxMessageSize.intValue(); } else { logger.warning("Could not get the getRemoteMaxMessageSize. Returning current link size: {}", linkSize); } return linkSize; })); } } @Override public boolean isDisposed() { return isDisposed.get(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void dispose() { close(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } /** * Disposes of the sender. * * @param errorCondition Error condition associated with close operation. * @param message Message associated with why the sender was closed. * * @return A mono that completes when the send link has closed. */ Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return isClosedMono.asMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .log("Setting error condition and disposing. {}", message); final Runnable closeWork = () -> { if (errorCondition != null && sender.getCondition() == null) { sender.setCondition(errorCondition); } sender.close(); }; return Mono.fromRunnable(() -> { try { reactorProvider.getReactorDispatcher().invoke(closeWork); } catch (IOException e) { logger.warning("Could not schedule close work. Running manually. And completing close.", e); closeWork.run(); handleClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException scheduling close work. And completing close."); closeWork.run(); handleClose(); } }).then(isClosedMono.asMono()) .publishOn(Schedulers.boundedElastic()); } /** * A mono that completes when the sender has completely closed. * * @return mono that completes when the sender has completely closed. */ Mono<Void> isClosed() { return isClosedMono.asMono(); } @Override Mono<DeliveryState> send(ReadableBuffer buffer, int messageFormat, DeliveryState deliveryState) { final Flux<EndpointState> activeEndpointFlux = RetryUtil.withRetry( handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), retryOptions, activeTimeoutMessage); return activeEndpointFlux.then(Mono.create(sink -> { sendWork(new RetriableWorkItem(buffer, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } /** * Add the work item in pending send to be processed on {@link ReactorDispatcher} thread. * * @param workItem to be processed. */ private void sendWork(RetriableWorkItem workItem) { final String deliveryTag = UUID.randomUUID().toString().replace("-", ""); synchronized (pendingSendLock) { this.pendingSendsMap.put(deliveryTag, workItem); this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0)); } this.scheduleWorkOnDispatcher(); } /** * Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke() */ private void processSendWork() { if (!hasConnected.get()) { logger.warning("Not connected. Not processing send work."); return; } if (isDisposed.get()) { logger.info("Sender is closed. Not executing work."); return; } while (hasConnected.get() && sender.getCredit() > 0) { final WeightedDeliveryTag weightedDelivery; final RetriableWorkItem workItem; final String deliveryTag; synchronized (pendingSendLock) { weightedDelivery = this.pendingSendsQueue.poll(); if (weightedDelivery != null) { deliveryTag = weightedDelivery.getDeliveryTag(); workItem = this.pendingSendsMap.get(deliveryTag); } else { workItem = null; deliveryTag = null; } } if (workItem == null) { if (deliveryTag != null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("sendData not found for this delivery."); } break; } Delivery delivery = null; boolean linkAdvance = false; int sentMsgSize = 0; Exception sendException = null; try { workItem.beforeTry(); delivery = sender.delivery(deliveryTag.getBytes(UTF_8)); delivery.setMessageFormat(workItem.getMessageFormat()); if (workItem.isDeliveryStateProvided()) { delivery.disposition(workItem.getDeliveryState()); } final ReadableBuffer encodedBuffer = workItem.getEncodedBuffer(); encodedBuffer.rewind(); sentMsgSize = sender.send(encodedBuffer); assert sentMsgSize == workItem.getEncodedMessageSize() : "Contract of the ProtonJ library for Sender. Send API changed"; linkAdvance = sender.advance(); } catch (Exception exception) { sendException = exception; } if (linkAdvance) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Sent message."); workItem.setWaitingForAck(); scheduler.schedule(new SendTimeout(deliveryTag), retryOptions.getTryTimeout().toMillis(), TimeUnit.MILLISECONDS); } else { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("sentMessageSize", sentMsgSize) .addKeyValue("payloadActualSize", workItem.getEncodedMessageSize()) .log("Sendlink advance failed."); DeliveryState outcome = null; if (delivery != null) { outcome = delivery.getRemoteState(); delivery.free(); } final AmqpErrorContext context = handler.getErrorContext(sender); final Throwable exception = sendException != null ? new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed. Please see cause for more details", entityPath), sendException, context) : new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed while advancing delivery(tag: %s).", entityPath, deliveryTag), context); workItem.error(exception, outcome); } } } private void processDeliveredMessage(Delivery delivery) { final DeliveryState outcome = delivery.getRemoteState(); final String deliveryTag = new String(delivery.getTag(), UTF_8); logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Process delivered message."); final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Mismatch (or send timed out)."); return; } else if (workItem.isDeliveryStateProvided()) { workItem.success(outcome); return; } if (outcome instanceof Accepted) { synchronized (errorConditionLock) { lastKnownLinkError = null; lastKnownErrorReportedAt = null; retryAttempts.set(0); } workItem.success(outcome); } else if (outcome instanceof Rejected) { final Rejected rejected = (Rejected) outcome; final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError(); final Exception exception = ExceptionUtil.toException(error.getCondition().toString(), error.getDescription(), handler.getErrorContext(sender)); logger.atWarning() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("rejected", rejected) .log("Delivery rejected."); final int retryAttempt; if (isGeneralSendError(error.getCondition())) { synchronized (errorConditionLock) { lastKnownLinkError = exception; lastKnownErrorReportedAt = Instant.now(); retryAttempt = retryAttempts.incrementAndGet(); } } else { retryAttempt = retryAttempts.get(); } final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt); if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) { cleanupFailedSend(workItem, exception, outcome); } else { workItem.setLastKnownException(exception); try { reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval); } catch (IOException | RejectedExecutionException schedulerException) { exception.initCause(schedulerException); cleanupFailedSend( workItem, new AmqpException(false, String.format(Locale.US, "Entity(%s): send operation failed while scheduling a" + " retry on Reactor, see cause for more details.", entityPath), schedulerException, handler.getErrorContext(sender)), outcome); } } } else if (outcome instanceof Released) { cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(), handler.getErrorContext(sender)), outcome); } else if (outcome instanceof Declared) { final Declared declared = (Declared) outcome; workItem.success(declared); } else { cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(), handler.getErrorContext(sender)), outcome); } } private void scheduleWorkOnDispatcher() { try { reactorProvider.getReactorDispatcher().invoke(this::processSendWork); } catch (IOException e) { logger.warning("Error scheduling work on reactor.", e); } catch (RejectedExecutionException e) { logger.info("Error scheduling work on reactor because of RejectedExecutionException."); } } private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception, final DeliveryState deliveryState) { workItem.error(exception, deliveryState); } private void completeClose() { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result).log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } } /** * Clears pending sends and puts an error in there. * * @param error Error to pass to pending sends. */ private void handleError(Throwable error) { synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed. Dropping error."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends with error."); } pendingSendsMap.forEach((key, value) -> value.error(error, null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private void handleClose() { final String message = String.format("Could not complete sends because link '%s' for '%s' is closed.", getLinkName(), entityPath); final AmqpErrorContext context = handler.getErrorContext(sender); synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends."); } pendingSendsMap.forEach((key, value) -> value.error(new AmqpException(true, message, context), null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private static boolean isGeneralSendError(Symbol amqpError) { return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR || amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED); } private static class WeightedDeliveryTag { private final String deliveryTag; private final int priority; WeightedDeliveryTag(final String deliveryTag, final int priority) { this.deliveryTag = deliveryTag; this.priority = priority; } private String getDeliveryTag() { return this.deliveryTag; } private int getPriority() { return this.priority; } } private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable { private static final long serialVersionUID = -7057500582037295635L; @Override public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) { return deliveryTag1.getPriority() - deliveryTag0.getPriority(); } } /** * Keeps track of messages that have been sent, but may not have been acknowledged by the service. */ private class SendTimeout implements Runnable { private final String deliveryTag; SendTimeout(String deliveryTag) { this.deliveryTag = deliveryTag; } @Override public void run() { final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { return; } Exception cause = lastKnownLinkError; final Exception lastError; final Instant lastErrorTime; synchronized (errorConditionLock) { lastError = lastKnownLinkError; lastErrorTime = lastKnownErrorReportedAt; } if (lastError != null && lastErrorTime != null) { final Instant now = Instant.now(); final boolean isLastErrorAfterSleepTime = lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS)); final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime; final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(retryOptions.getTryTimeout())); cause = isServerBusy || isLastErrorAfterOperationTimeout ? lastError : null; } final AmqpException exception; if (cause instanceof AmqpException) { exception = (AmqpException) cause; } else { exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR, String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath), handler.getErrorContext(sender)); } workItem.error(exception, null); } } }
class ReactorSender implements AmqpSendLink, AsyncCloseable, AutoCloseable { private static final String DELIVERY_TAG_KEY = "deliveryTag"; private static final String PENDING_SENDS_SIZE_KEY = "pending_sends_size"; private final String entityPath; private final Sender sender; private final SendLinkHandler handler; private final ReactorProvider reactorProvider; private final Disposable.Composite subscriptions; private final AtomicBoolean hasConnected = new AtomicBoolean(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final Object pendingSendLock = new Object(); private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>(); private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue = new PriorityQueue<>(1000, new DeliveryTagComparator()); private final ClientLogger logger; private final Flux<AmqpEndpointState> endpointStates; private final TokenManager tokenManager; private final MessageSerializer messageSerializer; private final AmqpRetryPolicy retry; private final AmqpRetryOptions retryOptions; private final String activeTimeoutMessage; private final Scheduler scheduler; private final AmqpMetricsProvider metricsProvider; private final Object errorConditionLock = new Object(); private volatile Exception lastKnownLinkError; private volatile Instant lastKnownErrorReportedAt; private volatile int linkSize; /** * Creates an instance of {@link ReactorSender}. * * @param amqpConnection The parent {@link AmqpConnection} that this sender lives in. * @param entityPath The message broker address for the sender. * @param sender The underlying proton-j sender. * @param handler The proton-j handler associated with the sender. * @param reactorProvider Provider to schedule work on the proton-j reactor. * @param tokenManager Token manager for authorising with the CBS node. Can be {@code null} if it is part of the * transaction manager. * @param messageSerializer Serializer to deserialise and serialize AMQP messages. * @param retryOptions Retry options. * @param scheduler Scheduler to schedule send timeout. */ ReactorSender(AmqpConnection amqpConnection, String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider, TokenManager tokenManager, MessageSerializer messageSerializer, AmqpRetryOptions retryOptions, Scheduler scheduler, AmqpMetricsProvider metricsProvider) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.sender = Objects.requireNonNull(sender, "'sender' cannot be null."); this.handler = Objects.requireNonNull(handler, "'handler' cannot be null."); this.reactorProvider = Objects.requireNonNull(reactorProvider, "'reactorProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.scheduler = Objects.requireNonNull(scheduler, "'scheduler' cannot be null."); this.retry = RetryUtil.getRetryPolicy(retryOptions); this.tokenManager = tokenManager; this.metricsProvider = metricsProvider; String connectionId = handler.getConnectionId() == null ? NOT_APPLICABLE : handler.getConnectionId(); String linkName = getLinkName() == null ? NOT_APPLICABLE : getLinkName(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(LINK_NAME_KEY, linkName); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(ReactorSender.class, loggingContext); this.activeTimeoutMessage = String.format( "ReactorSender connectionId[%s] linkName[%s]: Waiting for send and receive handler to be ACTIVE", handler.getConnectionId(), handler.getLinkName()); this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.verbose("State {}", state); this.hasConnected.set(state == EndpointState.ACTIVE); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { hasConnected.set(false); handleError(error); }) .doOnComplete(() -> { hasConnected.set(false); handleClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage), this.handler.getLinkCredits().subscribe(credit -> { logger.atVerbose().addKeyValue("credits", credit) .log("Credits on link."); this.scheduleWorkOnDispatcher(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); hasConnected.set(false); return closeAsync("Connection shutdown.", null); }).subscribe() ); if (tokenManager != null) { this.subscriptions.add(tokenManager.getAuthorizationResults().onErrorResume(error -> { final Mono<Void> operation = closeAsync(String.format("connectionId[%s] linkName[%s] Token renewal failure. Disposing send " + "link.", amqpConnection.getId(), getLinkName()), new ErrorCondition(Symbol.getSymbol(NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> { logger.atVerbose().addKeyValue("response", response) .log("Token refreshed."); }, error -> { }, () -> { logger.verbose(" Authorization completed. Disposing."); closeAsync("Authorization completed. Disposing.", null).subscribe(); })); } } @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } @Override public Mono<Void> send(Message message) { return send(message, null); } @Override public Mono<Void> send(Message message, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish message when disposed.", handler.getConnectionId(), getLinkName()))); } return getLinkSize() .flatMap(maxMessageSize -> { final int payloadSize = messageSerializer.getSize(message); final int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] bytes = new byte[allocationSize]; int encodedSize; try { encodedSize = message.encode(bytes, 0, allocationSize); } catch (BufferOverflowException exception) { final String errorMessage = String.format(Locale.US, "Error sending. Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024); final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, errorMessage, exception, handler.getErrorContext(sender)); return Mono.error(error); } return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState); }).then(); } @Override public Mono<Void> send(List<Message> messageBatch) { return send(messageBatch, null); } @Override public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish data batch when disposed.", handler.getConnectionId(), getLinkName()))); } if (messageBatch.size() == 1) { return send(messageBatch.get(0), deliveryState); } return getLinkSize() .flatMap(maxMessageSize -> { int totalEncodedSize = 0; final CompositeReadableBuffer buffer = new CompositeReadableBuffer(); final byte[] envelopBytes = batchEnvelopBytes(messageBatch.get(0), maxMessageSize); if (envelopBytes.length > 0) { totalEncodedSize += envelopBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(envelopBytes); } for (final Message message : messageBatch) { final byte[] sectionBytes = batchBinaryDataSectionBytes(message, maxMessageSize); if (sectionBytes.length > 0) { totalEncodedSize += sectionBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(sectionBytes); } else { logger.info("Ignoring the empty message org.apache.qpid.proton.message.message@{} in the batch.", Integer.toHexString(System.identityHashCode(message))); } } return send(buffer, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState); }).then(); } private byte[] batchEnvelopBytes(Message envelopMessage, int maxMessageSize) { final Message message = Proton.message(); message.setMessageAnnotations(envelopMessage.getMessageAnnotations()); if ((envelopMessage.getMessageId() instanceof String) && !CoreUtils.isNullOrEmpty((String) envelopMessage.getMessageId())) { message.setMessageId(envelopMessage.getMessageId()); } if (!CoreUtils.isNullOrEmpty(envelopMessage.getGroupId())) { message.setGroupId(envelopMessage.getGroupId()); } final int size = messageSerializer.getSize(message); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = message.encode(encodedBytes, 0, allocationSize); return Arrays.copyOf(encodedBytes, encodedSize); } private byte[] batchBinaryDataSectionBytes(Message sectionMessage, int maxMessageSize) { final int size = messageSerializer.getSize(sectionMessage); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = sectionMessage.encode(encodedBytes, 0, allocationSize); final Message message = Proton.message(); final Data binaryData = new Data(new Binary(encodedBytes, 0, encodedSize)); message.setBody(binaryData); final int binaryRawSize = binaryData.getValue().getLength(); final int binaryEncodedSize = binaryEncodedSize(binaryRawSize); final byte[] binaryEncodedBytes = new byte[binaryEncodedSize]; message.encode(binaryEncodedBytes, 0, binaryEncodedSize); return binaryEncodedBytes; } private Mono<Void> batchBufferOverflowError(int maxMessageSize) { return FluxUtil.monoError(logger, new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), new BufferOverflowException(), handler.getErrorContext(sender))); } /** * Compute the encoded size when encoding a binary data of given size per Amqp 1.0 spec "amqp:data:binary" format. * * @param binaryRawSize the length of the binary data. * @return the encoded size. */ private int binaryEncodedSize(int binaryRawSize) { if (binaryRawSize <= 255) { return 5 + binaryRawSize; } else { return 8 + binaryRawSize; } } @Override public AmqpErrorContext getErrorContext() { return handler.getErrorContext(sender); } @Override public String getLinkName() { return sender.getName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override public Mono<Integer> getLinkSize() { if (linkSize > 0) { return Mono.defer(() -> Mono.just(this.linkSize)); } synchronized (this) { if (linkSize > 0) { return Mono.defer(() -> Mono.just(linkSize)); } return RetryUtil.withRetry(getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE), retryOptions, activeTimeoutMessage) .then(Mono.fromCallable(() -> { final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize(); if (remoteMaxMessageSize != null) { linkSize = remoteMaxMessageSize.intValue(); } else { logger.warning("Could not get the getRemoteMaxMessageSize. Returning current link size: {}", linkSize); } return linkSize; })); } } @Override public boolean isDisposed() { return isDisposed.get(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void dispose() { close(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } /** * Disposes of the sender. * * @param errorCondition Error condition associated with close operation. * @param message Message associated with why the sender was closed. * * @return A mono that completes when the send link has closed. */ Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return isClosedMono.asMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .log("Setting error condition and disposing. {}", message); final Runnable closeWork = () -> { if (errorCondition != null && sender.getCondition() == null) { sender.setCondition(errorCondition); } sender.close(); }; return Mono.fromRunnable(() -> { try { reactorProvider.getReactorDispatcher().invoke(closeWork); } catch (IOException e) { logger.warning("Could not schedule close work. Running manually. And completing close.", e); closeWork.run(); handleClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException scheduling close work. And completing close."); closeWork.run(); handleClose(); } }).then(isClosedMono.asMono()) .publishOn(Schedulers.boundedElastic()); } /** * A mono that completes when the sender has completely closed. * * @return mono that completes when the sender has completely closed. */ Mono<Void> isClosed() { return isClosedMono.asMono(); } @Override Mono<DeliveryState> send(ReadableBuffer buffer, int messageFormat, DeliveryState deliveryState) { return onEndpointActive().then(Mono.create(sink -> { sendWork(new RetriableWorkItem(buffer, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } private Flux<EndpointState> onEndpointActive() { return RetryUtil.withRetry(handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), retryOptions, activeTimeoutMessage); } /** * Add the work item in pending send to be processed on {@link ReactorDispatcher} thread. * * @param workItem to be processed. */ private void sendWork(RetriableWorkItem workItem) { final String deliveryTag = UUID.randomUUID().toString().replace("-", ""); synchronized (pendingSendLock) { this.pendingSendsMap.put(deliveryTag, workItem); this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0)); } this.scheduleWorkOnDispatcher(); } /** * Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke() */ private void processSendWork() { if (!hasConnected.get()) { logger.warning("Not connected. Not processing send work."); return; } if (isDisposed.get()) { logger.info("Sender is closed. Not executing work."); return; } while (hasConnected.get() && sender.getCredit() > 0) { final WeightedDeliveryTag weightedDelivery; final RetriableWorkItem workItem; final String deliveryTag; synchronized (pendingSendLock) { weightedDelivery = this.pendingSendsQueue.poll(); if (weightedDelivery != null) { deliveryTag = weightedDelivery.getDeliveryTag(); workItem = this.pendingSendsMap.get(deliveryTag); } else { workItem = null; deliveryTag = null; } } if (workItem == null) { if (deliveryTag != null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("sendData not found for this delivery."); } break; } Delivery delivery = null; boolean linkAdvance = false; int sentMsgSize = 0; Exception sendException = null; try { workItem.beforeTry(); delivery = sender.delivery(deliveryTag.getBytes(UTF_8)); delivery.setMessageFormat(workItem.getMessageFormat()); if (workItem.isDeliveryStateProvided()) { delivery.disposition(workItem.getDeliveryState()); } workItem.send(sender); linkAdvance = sender.advance(); } catch (Exception exception) { sendException = exception; } if (linkAdvance) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Sent message."); workItem.setWaitingForAck(); scheduler.schedule(new SendTimeout(deliveryTag), retryOptions.getTryTimeout().toMillis(), TimeUnit.MILLISECONDS); } else { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("sentMessageSize", sentMsgSize) .addKeyValue("payloadActualSize", workItem.getEncodedMessageSize()) .log("Sendlink advance failed."); DeliveryState outcome = null; if (delivery != null) { outcome = delivery.getRemoteState(); delivery.free(); } final AmqpErrorContext context = handler.getErrorContext(sender); final Throwable exception = sendException != null ? new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed. Please see cause for more details", entityPath), sendException, context) : new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed while advancing delivery(tag: %s).", entityPath, deliveryTag), context); workItem.error(exception, outcome); } } } private void processDeliveredMessage(Delivery delivery) { final DeliveryState outcome = delivery.getRemoteState(); final String deliveryTag = new String(delivery.getTag(), UTF_8); logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Process delivered message."); final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Mismatch (or send timed out)."); return; } else if (workItem.isDeliveryStateProvided()) { workItem.success(outcome); return; } if (outcome instanceof Accepted) { synchronized (errorConditionLock) { lastKnownLinkError = null; lastKnownErrorReportedAt = null; retryAttempts.set(0); } workItem.success(outcome); } else if (outcome instanceof Rejected) { final Rejected rejected = (Rejected) outcome; final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError(); final Exception exception = ExceptionUtil.toException(error.getCondition().toString(), error.getDescription(), handler.getErrorContext(sender)); logger.atWarning() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("rejected", rejected) .log("Delivery rejected."); final int retryAttempt; if (isGeneralSendError(error.getCondition())) { synchronized (errorConditionLock) { lastKnownLinkError = exception; lastKnownErrorReportedAt = Instant.now(); retryAttempt = retryAttempts.incrementAndGet(); } } else { retryAttempt = retryAttempts.get(); } final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt); if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) { cleanupFailedSend(workItem, exception, outcome); } else { workItem.setLastKnownException(exception); try { reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval); } catch (IOException | RejectedExecutionException schedulerException) { exception.initCause(schedulerException); cleanupFailedSend( workItem, new AmqpException(false, String.format(Locale.US, "Entity(%s): send operation failed while scheduling a" + " retry on Reactor, see cause for more details.", entityPath), schedulerException, handler.getErrorContext(sender)), outcome); } } } else if (outcome instanceof Released) { cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(), handler.getErrorContext(sender)), outcome); } else if (outcome instanceof Declared) { final Declared declared = (Declared) outcome; workItem.success(declared); } else { cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(), handler.getErrorContext(sender)), outcome); } } private void scheduleWorkOnDispatcher() { try { reactorProvider.getReactorDispatcher().invoke(this::processSendWork); } catch (IOException e) { logger.warning("Error scheduling work on reactor.", e); } catch (RejectedExecutionException e) { logger.info("Error scheduling work on reactor because of RejectedExecutionException."); } } private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception, final DeliveryState deliveryState) { workItem.error(exception, deliveryState); } private void completeClose() { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result).log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } } /** * Clears pending sends and puts an error in there. * * @param error Error to pass to pending sends. */ private void handleError(Throwable error) { synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed. Dropping error."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends with error."); } pendingSendsMap.forEach((key, value) -> value.error(error, null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private void handleClose() { final String message = String.format("Could not complete sends because link '%s' for '%s' is closed.", getLinkName(), entityPath); final AmqpErrorContext context = handler.getErrorContext(sender); synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends."); } pendingSendsMap.forEach((key, value) -> value.error(new AmqpException(true, message, context), null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private static boolean isGeneralSendError(Symbol amqpError) { return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR || amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED); } private static class WeightedDeliveryTag { private final String deliveryTag; private final int priority; WeightedDeliveryTag(final String deliveryTag, final int priority) { this.deliveryTag = deliveryTag; this.priority = priority; } private String getDeliveryTag() { return this.deliveryTag; } private int getPriority() { return this.priority; } } private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable { private static final long serialVersionUID = -7057500582037295635L; @Override public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) { return deliveryTag1.getPriority() - deliveryTag0.getPriority(); } } /** * Keeps track of messages that have been sent, but may not have been acknowledged by the service. */ private class SendTimeout implements Runnable { private final String deliveryTag; SendTimeout(String deliveryTag) { this.deliveryTag = deliveryTag; } @Override public void run() { final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { return; } Exception cause = lastKnownLinkError; final Exception lastError; final Instant lastErrorTime; synchronized (errorConditionLock) { lastError = lastKnownLinkError; lastErrorTime = lastKnownErrorReportedAt; } if (lastError != null && lastErrorTime != null) { final Instant now = Instant.now(); final boolean isLastErrorAfterSleepTime = lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS)); final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime; final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(retryOptions.getTryTimeout())); cause = isServerBusy || isLastErrorAfterOperationTimeout ? lastError : null; } final AmqpException exception; if (cause instanceof AmqpException) { exception = (AmqpException) cause; } else { exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR, String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath), handler.getErrorContext(sender)); } workItem.error(exception, null); } } }
Thinking a bit more - for this non-batch code path, that needs a single byte[], I think we can avoid the wrapper ReadableBuffer instances. Though we don't see this wrapper type contributing to GC pressure (and it has an optimized path for single byte[], so comparable GC counts), but avoiding the wrapper is still a good idea. So updated the logic.
public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) { final CompositeReadableBuffer buffer = new CompositeReadableBuffer(); buffer.append(bytes).limit(arrayOffset); return send(buffer, messageFormat, deliveryState); }
final CompositeReadableBuffer buffer = new CompositeReadableBuffer();
public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) { return onEndpointActive().then(Mono.create(sink -> { sendWork(new RetriableWorkItem(bytes, arrayOffset, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); }
class ReactorSender implements AmqpSendLink, AsyncCloseable, AutoCloseable { private static final String DELIVERY_TAG_KEY = "deliveryTag"; private static final String PENDING_SENDS_SIZE_KEY = "pending_sends_size"; private final String entityPath; private final Sender sender; private final SendLinkHandler handler; private final ReactorProvider reactorProvider; private final Disposable.Composite subscriptions; private final AtomicBoolean hasConnected = new AtomicBoolean(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final Object pendingSendLock = new Object(); private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>(); private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue = new PriorityQueue<>(1000, new DeliveryTagComparator()); private final ClientLogger logger; private final Flux<AmqpEndpointState> endpointStates; private final TokenManager tokenManager; private final MessageSerializer messageSerializer; private final AmqpRetryPolicy retry; private final AmqpRetryOptions retryOptions; private final String activeTimeoutMessage; private final Scheduler scheduler; private final AmqpMetricsProvider metricsProvider; private final Object errorConditionLock = new Object(); private volatile Exception lastKnownLinkError; private volatile Instant lastKnownErrorReportedAt; private volatile int linkSize; /** * Creates an instance of {@link ReactorSender}. * * @param amqpConnection The parent {@link AmqpConnection} that this sender lives in. * @param entityPath The message broker address for the sender. * @param sender The underlying proton-j sender. * @param handler The proton-j handler associated with the sender. * @param reactorProvider Provider to schedule work on the proton-j reactor. * @param tokenManager Token manager for authorising with the CBS node. Can be {@code null} if it is part of the * transaction manager. * @param messageSerializer Serializer to deserialise and serialize AMQP messages. * @param retryOptions Retry options. * @param scheduler Scheduler to schedule send timeout. */ ReactorSender(AmqpConnection amqpConnection, String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider, TokenManager tokenManager, MessageSerializer messageSerializer, AmqpRetryOptions retryOptions, Scheduler scheduler, AmqpMetricsProvider metricsProvider) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.sender = Objects.requireNonNull(sender, "'sender' cannot be null."); this.handler = Objects.requireNonNull(handler, "'handler' cannot be null."); this.reactorProvider = Objects.requireNonNull(reactorProvider, "'reactorProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.scheduler = Objects.requireNonNull(scheduler, "'scheduler' cannot be null."); this.retry = RetryUtil.getRetryPolicy(retryOptions); this.tokenManager = tokenManager; this.metricsProvider = metricsProvider; String connectionId = handler.getConnectionId() == null ? NOT_APPLICABLE : handler.getConnectionId(); String linkName = getLinkName() == null ? NOT_APPLICABLE : getLinkName(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(LINK_NAME_KEY, linkName); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(ReactorSender.class, loggingContext); this.activeTimeoutMessage = String.format( "ReactorSender connectionId[%s] linkName[%s]: Waiting for send and receive handler to be ACTIVE", handler.getConnectionId(), handler.getLinkName()); this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.verbose("State {}", state); this.hasConnected.set(state == EndpointState.ACTIVE); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { hasConnected.set(false); handleError(error); }) .doOnComplete(() -> { hasConnected.set(false); handleClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage), this.handler.getLinkCredits().subscribe(credit -> { logger.atVerbose().addKeyValue("credits", credit) .log("Credits on link."); this.scheduleWorkOnDispatcher(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); hasConnected.set(false); return closeAsync("Connection shutdown.", null); }).subscribe() ); if (tokenManager != null) { this.subscriptions.add(tokenManager.getAuthorizationResults().onErrorResume(error -> { final Mono<Void> operation = closeAsync(String.format("connectionId[%s] linkName[%s] Token renewal failure. Disposing send " + "link.", amqpConnection.getId(), getLinkName()), new ErrorCondition(Symbol.getSymbol(NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> { logger.atVerbose().addKeyValue("response", response) .log("Token refreshed."); }, error -> { }, () -> { logger.verbose(" Authorization completed. Disposing."); closeAsync("Authorization completed. Disposing.", null).subscribe(); })); } } @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } @Override public Mono<Void> send(Message message) { return send(message, null); } @Override public Mono<Void> send(Message message, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish message when disposed.", handler.getConnectionId(), getLinkName()))); } return getLinkSize() .flatMap(maxMessageSize -> { final int payloadSize = messageSerializer.getSize(message); final int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] bytes = new byte[allocationSize]; int encodedSize; try { encodedSize = message.encode(bytes, 0, allocationSize); } catch (BufferOverflowException exception) { final String errorMessage = String.format(Locale.US, "Error sending. Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024); final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, errorMessage, exception, handler.getErrorContext(sender)); return Mono.error(error); } return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState); }).then(); } @Override public Mono<Void> send(List<Message> messageBatch) { return send(messageBatch, null); } @Override public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish data batch when disposed.", handler.getConnectionId(), getLinkName()))); } if (messageBatch.size() == 1) { return send(messageBatch.get(0), deliveryState); } return getLinkSize() .flatMap(maxMessageSize -> { int totalEncodedSize = 0; final CompositeReadableBuffer buffer = new CompositeReadableBuffer(); final byte[] envelopBytes = batchEnvelopBytes(messageBatch.get(0), maxMessageSize); if (envelopBytes.length > 0) { totalEncodedSize += envelopBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(envelopBytes); } for (final Message message : messageBatch) { final byte[] sectionBytes = batchBinaryDataSectionBytes(message, maxMessageSize); if (sectionBytes.length > 0) { totalEncodedSize += sectionBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(sectionBytes); } } return send(buffer, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState); }).then(); } private byte[] batchEnvelopBytes(Message envelopMessage, int maxMessageSize) { final Message message = Proton.message(); message.setMessageAnnotations(envelopMessage.getMessageAnnotations()); final int size = messageSerializer.getSize(message); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = message.encode(encodedBytes, 0, allocationSize); return Arrays.copyOf(encodedBytes, encodedSize); } private byte[] batchBinaryDataSectionBytes(Message sectionMessage, int maxMessageSize) { final int size = messageSerializer.getSize(sectionMessage); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = sectionMessage.encode(encodedBytes, 0, allocationSize); final Message message = Proton.message(); final Data binaryData = new Data(new Binary(encodedBytes, 0, encodedSize)); message.setBody(binaryData); final int binaryRawSize = binaryData.getValue().getLength(); final int binaryEncodedSize = binaryEncodedSize(binaryRawSize); final byte[] binaryEncodedBytes = new byte[binaryEncodedSize]; message.encode(binaryEncodedBytes, 0, binaryEncodedSize); return binaryEncodedBytes; } private Mono<Void> batchBufferOverflowError(int maxMessageSize) { return Mono.error(new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), new BufferOverflowException(), handler.getErrorContext(sender))); } /** * Compute the encoded size when encoding a binary data of given size per Amqp 1.0 spec "amqp:data:binary" format. * * @param binaryRawSize the length of the binary data. * @return the encoded size. */ private int binaryEncodedSize(int binaryRawSize) { if (binaryRawSize <= 255) { return 5 + binaryRawSize; } else { return 8 + binaryRawSize; } } @Override public AmqpErrorContext getErrorContext() { return handler.getErrorContext(sender); } @Override public String getLinkName() { return sender.getName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override public Mono<Integer> getLinkSize() { if (linkSize > 0) { return Mono.defer(() -> Mono.just(this.linkSize)); } synchronized (this) { if (linkSize > 0) { return Mono.defer(() -> Mono.just(linkSize)); } return RetryUtil.withRetry(getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE), retryOptions, activeTimeoutMessage) .then(Mono.fromCallable(() -> { final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize(); if (remoteMaxMessageSize != null) { linkSize = remoteMaxMessageSize.intValue(); } else { logger.warning("Could not get the getRemoteMaxMessageSize. Returning current link size: {}", linkSize); } return linkSize; })); } } @Override public boolean isDisposed() { return isDisposed.get(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void dispose() { close(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } /** * Disposes of the sender. * * @param errorCondition Error condition associated with close operation. * @param message Message associated with why the sender was closed. * * @return A mono that completes when the send link has closed. */ Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return isClosedMono.asMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .log("Setting error condition and disposing. {}", message); final Runnable closeWork = () -> { if (errorCondition != null && sender.getCondition() == null) { sender.setCondition(errorCondition); } sender.close(); }; return Mono.fromRunnable(() -> { try { reactorProvider.getReactorDispatcher().invoke(closeWork); } catch (IOException e) { logger.warning("Could not schedule close work. Running manually. And completing close.", e); closeWork.run(); handleClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException scheduling close work. And completing close."); closeWork.run(); handleClose(); } }).then(isClosedMono.asMono()) .publishOn(Schedulers.boundedElastic()); } /** * A mono that completes when the sender has completely closed. * * @return mono that completes when the sender has completely closed. */ Mono<Void> isClosed() { return isClosedMono.asMono(); } @Override Mono<DeliveryState> send(ReadableBuffer buffer, int messageFormat, DeliveryState deliveryState) { final Flux<EndpointState> activeEndpointFlux = RetryUtil.withRetry( handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), retryOptions, activeTimeoutMessage); return activeEndpointFlux.then(Mono.create(sink -> { sendWork(new RetriableWorkItem(buffer, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } /** * Add the work item in pending send to be processed on {@link ReactorDispatcher} thread. * * @param workItem to be processed. */ private void sendWork(RetriableWorkItem workItem) { final String deliveryTag = UUID.randomUUID().toString().replace("-", ""); synchronized (pendingSendLock) { this.pendingSendsMap.put(deliveryTag, workItem); this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0)); } this.scheduleWorkOnDispatcher(); } /** * Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke() */ private void processSendWork() { if (!hasConnected.get()) { logger.warning("Not connected. Not processing send work."); return; } if (isDisposed.get()) { logger.info("Sender is closed. Not executing work."); return; } while (hasConnected.get() && sender.getCredit() > 0) { final WeightedDeliveryTag weightedDelivery; final RetriableWorkItem workItem; final String deliveryTag; synchronized (pendingSendLock) { weightedDelivery = this.pendingSendsQueue.poll(); if (weightedDelivery != null) { deliveryTag = weightedDelivery.getDeliveryTag(); workItem = this.pendingSendsMap.get(deliveryTag); } else { workItem = null; deliveryTag = null; } } if (workItem == null) { if (deliveryTag != null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("sendData not found for this delivery."); } break; } Delivery delivery = null; boolean linkAdvance = false; int sentMsgSize = 0; Exception sendException = null; try { workItem.beforeTry(); delivery = sender.delivery(deliveryTag.getBytes(UTF_8)); delivery.setMessageFormat(workItem.getMessageFormat()); if (workItem.isDeliveryStateProvided()) { delivery.disposition(workItem.getDeliveryState()); } final ReadableBuffer encodedBuffer = workItem.getEncodedBuffer(); encodedBuffer.rewind(); sentMsgSize = sender.send(encodedBuffer); assert sentMsgSize == workItem.getEncodedMessageSize() : "Contract of the ProtonJ library for Sender. Send API changed"; linkAdvance = sender.advance(); } catch (Exception exception) { sendException = exception; } if (linkAdvance) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Sent message."); workItem.setWaitingForAck(); scheduler.schedule(new SendTimeout(deliveryTag), retryOptions.getTryTimeout().toMillis(), TimeUnit.MILLISECONDS); } else { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("sentMessageSize", sentMsgSize) .addKeyValue("payloadActualSize", workItem.getEncodedMessageSize()) .log("Sendlink advance failed."); DeliveryState outcome = null; if (delivery != null) { outcome = delivery.getRemoteState(); delivery.free(); } final AmqpErrorContext context = handler.getErrorContext(sender); final Throwable exception = sendException != null ? new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed. Please see cause for more details", entityPath), sendException, context) : new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed while advancing delivery(tag: %s).", entityPath, deliveryTag), context); workItem.error(exception, outcome); } } } private void processDeliveredMessage(Delivery delivery) { final DeliveryState outcome = delivery.getRemoteState(); final String deliveryTag = new String(delivery.getTag(), UTF_8); logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Process delivered message."); final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Mismatch (or send timed out)."); return; } else if (workItem.isDeliveryStateProvided()) { workItem.success(outcome); return; } if (outcome instanceof Accepted) { synchronized (errorConditionLock) { lastKnownLinkError = null; lastKnownErrorReportedAt = null; retryAttempts.set(0); } workItem.success(outcome); } else if (outcome instanceof Rejected) { final Rejected rejected = (Rejected) outcome; final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError(); final Exception exception = ExceptionUtil.toException(error.getCondition().toString(), error.getDescription(), handler.getErrorContext(sender)); logger.atWarning() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("rejected", rejected) .log("Delivery rejected."); final int retryAttempt; if (isGeneralSendError(error.getCondition())) { synchronized (errorConditionLock) { lastKnownLinkError = exception; lastKnownErrorReportedAt = Instant.now(); retryAttempt = retryAttempts.incrementAndGet(); } } else { retryAttempt = retryAttempts.get(); } final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt); if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) { cleanupFailedSend(workItem, exception, outcome); } else { workItem.setLastKnownException(exception); try { reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval); } catch (IOException | RejectedExecutionException schedulerException) { exception.initCause(schedulerException); cleanupFailedSend( workItem, new AmqpException(false, String.format(Locale.US, "Entity(%s): send operation failed while scheduling a" + " retry on Reactor, see cause for more details.", entityPath), schedulerException, handler.getErrorContext(sender)), outcome); } } } else if (outcome instanceof Released) { cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(), handler.getErrorContext(sender)), outcome); } else if (outcome instanceof Declared) { final Declared declared = (Declared) outcome; workItem.success(declared); } else { cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(), handler.getErrorContext(sender)), outcome); } } private void scheduleWorkOnDispatcher() { try { reactorProvider.getReactorDispatcher().invoke(this::processSendWork); } catch (IOException e) { logger.warning("Error scheduling work on reactor.", e); } catch (RejectedExecutionException e) { logger.info("Error scheduling work on reactor because of RejectedExecutionException."); } } private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception, final DeliveryState deliveryState) { workItem.error(exception, deliveryState); } private void completeClose() { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result).log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } } /** * Clears pending sends and puts an error in there. * * @param error Error to pass to pending sends. */ private void handleError(Throwable error) { synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed. Dropping error."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends with error."); } pendingSendsMap.forEach((key, value) -> value.error(error, null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private void handleClose() { final String message = String.format("Could not complete sends because link '%s' for '%s' is closed.", getLinkName(), entityPath); final AmqpErrorContext context = handler.getErrorContext(sender); synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends."); } pendingSendsMap.forEach((key, value) -> value.error(new AmqpException(true, message, context), null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private static boolean isGeneralSendError(Symbol amqpError) { return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR || amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED); } private static class WeightedDeliveryTag { private final String deliveryTag; private final int priority; WeightedDeliveryTag(final String deliveryTag, final int priority) { this.deliveryTag = deliveryTag; this.priority = priority; } private String getDeliveryTag() { return this.deliveryTag; } private int getPriority() { return this.priority; } } private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable { private static final long serialVersionUID = -7057500582037295635L; @Override public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) { return deliveryTag1.getPriority() - deliveryTag0.getPriority(); } } /** * Keeps track of messages that have been sent, but may not have been acknowledged by the service. */ private class SendTimeout implements Runnable { private final String deliveryTag; SendTimeout(String deliveryTag) { this.deliveryTag = deliveryTag; } @Override public void run() { final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { return; } Exception cause = lastKnownLinkError; final Exception lastError; final Instant lastErrorTime; synchronized (errorConditionLock) { lastError = lastKnownLinkError; lastErrorTime = lastKnownErrorReportedAt; } if (lastError != null && lastErrorTime != null) { final Instant now = Instant.now(); final boolean isLastErrorAfterSleepTime = lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS)); final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime; final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(retryOptions.getTryTimeout())); cause = isServerBusy || isLastErrorAfterOperationTimeout ? lastError : null; } final AmqpException exception; if (cause instanceof AmqpException) { exception = (AmqpException) cause; } else { exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR, String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath), handler.getErrorContext(sender)); } workItem.error(exception, null); } } }
class ReactorSender implements AmqpSendLink, AsyncCloseable, AutoCloseable { private static final String DELIVERY_TAG_KEY = "deliveryTag"; private static final String PENDING_SENDS_SIZE_KEY = "pending_sends_size"; private final String entityPath; private final Sender sender; private final SendLinkHandler handler; private final ReactorProvider reactorProvider; private final Disposable.Composite subscriptions; private final AtomicBoolean hasConnected = new AtomicBoolean(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final Object pendingSendLock = new Object(); private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>(); private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue = new PriorityQueue<>(1000, new DeliveryTagComparator()); private final ClientLogger logger; private final Flux<AmqpEndpointState> endpointStates; private final TokenManager tokenManager; private final MessageSerializer messageSerializer; private final AmqpRetryPolicy retry; private final AmqpRetryOptions retryOptions; private final String activeTimeoutMessage; private final Scheduler scheduler; private final AmqpMetricsProvider metricsProvider; private final Object errorConditionLock = new Object(); private volatile Exception lastKnownLinkError; private volatile Instant lastKnownErrorReportedAt; private volatile int linkSize; /** * Creates an instance of {@link ReactorSender}. * * @param amqpConnection The parent {@link AmqpConnection} that this sender lives in. * @param entityPath The message broker address for the sender. * @param sender The underlying proton-j sender. * @param handler The proton-j handler associated with the sender. * @param reactorProvider Provider to schedule work on the proton-j reactor. * @param tokenManager Token manager for authorising with the CBS node. Can be {@code null} if it is part of the * transaction manager. * @param messageSerializer Serializer to deserialise and serialize AMQP messages. * @param retryOptions Retry options. * @param scheduler Scheduler to schedule send timeout. */ ReactorSender(AmqpConnection amqpConnection, String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider, TokenManager tokenManager, MessageSerializer messageSerializer, AmqpRetryOptions retryOptions, Scheduler scheduler, AmqpMetricsProvider metricsProvider) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.sender = Objects.requireNonNull(sender, "'sender' cannot be null."); this.handler = Objects.requireNonNull(handler, "'handler' cannot be null."); this.reactorProvider = Objects.requireNonNull(reactorProvider, "'reactorProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.scheduler = Objects.requireNonNull(scheduler, "'scheduler' cannot be null."); this.retry = RetryUtil.getRetryPolicy(retryOptions); this.tokenManager = tokenManager; this.metricsProvider = metricsProvider; String connectionId = handler.getConnectionId() == null ? NOT_APPLICABLE : handler.getConnectionId(); String linkName = getLinkName() == null ? NOT_APPLICABLE : getLinkName(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(LINK_NAME_KEY, linkName); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(ReactorSender.class, loggingContext); this.activeTimeoutMessage = String.format( "ReactorSender connectionId[%s] linkName[%s]: Waiting for send and receive handler to be ACTIVE", handler.getConnectionId(), handler.getLinkName()); this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.verbose("State {}", state); this.hasConnected.set(state == EndpointState.ACTIVE); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { hasConnected.set(false); handleError(error); }) .doOnComplete(() -> { hasConnected.set(false); handleClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage), this.handler.getLinkCredits().subscribe(credit -> { logger.atVerbose().addKeyValue("credits", credit) .log("Credits on link."); this.scheduleWorkOnDispatcher(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); hasConnected.set(false); return closeAsync("Connection shutdown.", null); }).subscribe() ); if (tokenManager != null) { this.subscriptions.add(tokenManager.getAuthorizationResults().onErrorResume(error -> { final Mono<Void> operation = closeAsync(String.format("connectionId[%s] linkName[%s] Token renewal failure. Disposing send " + "link.", amqpConnection.getId(), getLinkName()), new ErrorCondition(Symbol.getSymbol(NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> { logger.atVerbose().addKeyValue("response", response) .log("Token refreshed."); }, error -> { }, () -> { logger.verbose(" Authorization completed. Disposing."); closeAsync("Authorization completed. Disposing.", null).subscribe(); })); } } @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } @Override public Mono<Void> send(Message message) { return send(message, null); } @Override public Mono<Void> send(Message message, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish message when disposed.", handler.getConnectionId(), getLinkName()))); } return getLinkSize() .flatMap(maxMessageSize -> { final int payloadSize = messageSerializer.getSize(message); final int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] bytes = new byte[allocationSize]; int encodedSize; try { encodedSize = message.encode(bytes, 0, allocationSize); } catch (BufferOverflowException exception) { final String errorMessage = String.format(Locale.US, "Error sending. Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024); final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, errorMessage, exception, handler.getErrorContext(sender)); return Mono.error(error); } return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState); }).then(); } @Override public Mono<Void> send(List<Message> messageBatch) { return send(messageBatch, null); } @Override public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish data batch when disposed.", handler.getConnectionId(), getLinkName()))); } if (messageBatch.size() == 1) { return send(messageBatch.get(0), deliveryState); } return getLinkSize() .flatMap(maxMessageSize -> { int totalEncodedSize = 0; final CompositeReadableBuffer buffer = new CompositeReadableBuffer(); final byte[] envelopBytes = batchEnvelopBytes(messageBatch.get(0), maxMessageSize); if (envelopBytes.length > 0) { totalEncodedSize += envelopBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(envelopBytes); } for (final Message message : messageBatch) { final byte[] sectionBytes = batchBinaryDataSectionBytes(message, maxMessageSize); if (sectionBytes.length > 0) { totalEncodedSize += sectionBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(sectionBytes); } else { logger.info("Ignoring the empty message org.apache.qpid.proton.message.message@{} in the batch.", Integer.toHexString(System.identityHashCode(message))); } } return send(buffer, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState); }).then(); } private byte[] batchEnvelopBytes(Message envelopMessage, int maxMessageSize) { final Message message = Proton.message(); message.setMessageAnnotations(envelopMessage.getMessageAnnotations()); if ((envelopMessage.getMessageId() instanceof String) && !CoreUtils.isNullOrEmpty((String) envelopMessage.getMessageId())) { message.setMessageId(envelopMessage.getMessageId()); } if (!CoreUtils.isNullOrEmpty(envelopMessage.getGroupId())) { message.setGroupId(envelopMessage.getGroupId()); } final int size = messageSerializer.getSize(message); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = message.encode(encodedBytes, 0, allocationSize); return Arrays.copyOf(encodedBytes, encodedSize); } private byte[] batchBinaryDataSectionBytes(Message sectionMessage, int maxMessageSize) { final int size = messageSerializer.getSize(sectionMessage); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = sectionMessage.encode(encodedBytes, 0, allocationSize); final Message message = Proton.message(); final Data binaryData = new Data(new Binary(encodedBytes, 0, encodedSize)); message.setBody(binaryData); final int binaryRawSize = binaryData.getValue().getLength(); final int binaryEncodedSize = binaryEncodedSize(binaryRawSize); final byte[] binaryEncodedBytes = new byte[binaryEncodedSize]; message.encode(binaryEncodedBytes, 0, binaryEncodedSize); return binaryEncodedBytes; } private Mono<Void> batchBufferOverflowError(int maxMessageSize) { return FluxUtil.monoError(logger, new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), new BufferOverflowException(), handler.getErrorContext(sender))); } /** * Compute the encoded size when encoding a binary data of given size per Amqp 1.0 spec "amqp:data:binary" format. * * @param binaryRawSize the length of the binary data. * @return the encoded size. */ private int binaryEncodedSize(int binaryRawSize) { if (binaryRawSize <= 255) { return 5 + binaryRawSize; } else { return 8 + binaryRawSize; } } @Override public AmqpErrorContext getErrorContext() { return handler.getErrorContext(sender); } @Override public String getLinkName() { return sender.getName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override public Mono<Integer> getLinkSize() { if (linkSize > 0) { return Mono.defer(() -> Mono.just(this.linkSize)); } synchronized (this) { if (linkSize > 0) { return Mono.defer(() -> Mono.just(linkSize)); } return RetryUtil.withRetry(getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE), retryOptions, activeTimeoutMessage) .then(Mono.fromCallable(() -> { final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize(); if (remoteMaxMessageSize != null) { linkSize = remoteMaxMessageSize.intValue(); } else { logger.warning("Could not get the getRemoteMaxMessageSize. Returning current link size: {}", linkSize); } return linkSize; })); } } @Override public boolean isDisposed() { return isDisposed.get(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void dispose() { close(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } /** * Disposes of the sender. * * @param errorCondition Error condition associated with close operation. * @param message Message associated with why the sender was closed. * * @return A mono that completes when the send link has closed. */ Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return isClosedMono.asMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .log("Setting error condition and disposing. {}", message); final Runnable closeWork = () -> { if (errorCondition != null && sender.getCondition() == null) { sender.setCondition(errorCondition); } sender.close(); }; return Mono.fromRunnable(() -> { try { reactorProvider.getReactorDispatcher().invoke(closeWork); } catch (IOException e) { logger.warning("Could not schedule close work. Running manually. And completing close.", e); closeWork.run(); handleClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException scheduling close work. And completing close."); closeWork.run(); handleClose(); } }).then(isClosedMono.asMono()) .publishOn(Schedulers.boundedElastic()); } /** * A mono that completes when the sender has completely closed. * * @return mono that completes when the sender has completely closed. */ Mono<Void> isClosed() { return isClosedMono.asMono(); } @Override Mono<DeliveryState> send(ReadableBuffer buffer, int messageFormat, DeliveryState deliveryState) { return onEndpointActive().then(Mono.create(sink -> { sendWork(new RetriableWorkItem(buffer, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } private Flux<EndpointState> onEndpointActive() { return RetryUtil.withRetry(handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), retryOptions, activeTimeoutMessage); } /** * Add the work item in pending send to be processed on {@link ReactorDispatcher} thread. * * @param workItem to be processed. */ private void sendWork(RetriableWorkItem workItem) { final String deliveryTag = UUID.randomUUID().toString().replace("-", ""); synchronized (pendingSendLock) { this.pendingSendsMap.put(deliveryTag, workItem); this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0)); } this.scheduleWorkOnDispatcher(); } /** * Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke() */ private void processSendWork() { if (!hasConnected.get()) { logger.warning("Not connected. Not processing send work."); return; } if (isDisposed.get()) { logger.info("Sender is closed. Not executing work."); return; } while (hasConnected.get() && sender.getCredit() > 0) { final WeightedDeliveryTag weightedDelivery; final RetriableWorkItem workItem; final String deliveryTag; synchronized (pendingSendLock) { weightedDelivery = this.pendingSendsQueue.poll(); if (weightedDelivery != null) { deliveryTag = weightedDelivery.getDeliveryTag(); workItem = this.pendingSendsMap.get(deliveryTag); } else { workItem = null; deliveryTag = null; } } if (workItem == null) { if (deliveryTag != null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("sendData not found for this delivery."); } break; } Delivery delivery = null; boolean linkAdvance = false; int sentMsgSize = 0; Exception sendException = null; try { workItem.beforeTry(); delivery = sender.delivery(deliveryTag.getBytes(UTF_8)); delivery.setMessageFormat(workItem.getMessageFormat()); if (workItem.isDeliveryStateProvided()) { delivery.disposition(workItem.getDeliveryState()); } workItem.send(sender); linkAdvance = sender.advance(); } catch (Exception exception) { sendException = exception; } if (linkAdvance) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Sent message."); workItem.setWaitingForAck(); scheduler.schedule(new SendTimeout(deliveryTag), retryOptions.getTryTimeout().toMillis(), TimeUnit.MILLISECONDS); } else { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("sentMessageSize", sentMsgSize) .addKeyValue("payloadActualSize", workItem.getEncodedMessageSize()) .log("Sendlink advance failed."); DeliveryState outcome = null; if (delivery != null) { outcome = delivery.getRemoteState(); delivery.free(); } final AmqpErrorContext context = handler.getErrorContext(sender); final Throwable exception = sendException != null ? new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed. Please see cause for more details", entityPath), sendException, context) : new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed while advancing delivery(tag: %s).", entityPath, deliveryTag), context); workItem.error(exception, outcome); } } } private void processDeliveredMessage(Delivery delivery) { final DeliveryState outcome = delivery.getRemoteState(); final String deliveryTag = new String(delivery.getTag(), UTF_8); logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Process delivered message."); final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Mismatch (or send timed out)."); return; } else if (workItem.isDeliveryStateProvided()) { workItem.success(outcome); return; } if (outcome instanceof Accepted) { synchronized (errorConditionLock) { lastKnownLinkError = null; lastKnownErrorReportedAt = null; retryAttempts.set(0); } workItem.success(outcome); } else if (outcome instanceof Rejected) { final Rejected rejected = (Rejected) outcome; final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError(); final Exception exception = ExceptionUtil.toException(error.getCondition().toString(), error.getDescription(), handler.getErrorContext(sender)); logger.atWarning() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("rejected", rejected) .log("Delivery rejected."); final int retryAttempt; if (isGeneralSendError(error.getCondition())) { synchronized (errorConditionLock) { lastKnownLinkError = exception; lastKnownErrorReportedAt = Instant.now(); retryAttempt = retryAttempts.incrementAndGet(); } } else { retryAttempt = retryAttempts.get(); } final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt); if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) { cleanupFailedSend(workItem, exception, outcome); } else { workItem.setLastKnownException(exception); try { reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval); } catch (IOException | RejectedExecutionException schedulerException) { exception.initCause(schedulerException); cleanupFailedSend( workItem, new AmqpException(false, String.format(Locale.US, "Entity(%s): send operation failed while scheduling a" + " retry on Reactor, see cause for more details.", entityPath), schedulerException, handler.getErrorContext(sender)), outcome); } } } else if (outcome instanceof Released) { cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(), handler.getErrorContext(sender)), outcome); } else if (outcome instanceof Declared) { final Declared declared = (Declared) outcome; workItem.success(declared); } else { cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(), handler.getErrorContext(sender)), outcome); } } private void scheduleWorkOnDispatcher() { try { reactorProvider.getReactorDispatcher().invoke(this::processSendWork); } catch (IOException e) { logger.warning("Error scheduling work on reactor.", e); } catch (RejectedExecutionException e) { logger.info("Error scheduling work on reactor because of RejectedExecutionException."); } } private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception, final DeliveryState deliveryState) { workItem.error(exception, deliveryState); } private void completeClose() { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result).log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } } /** * Clears pending sends and puts an error in there. * * @param error Error to pass to pending sends. */ private void handleError(Throwable error) { synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed. Dropping error."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends with error."); } pendingSendsMap.forEach((key, value) -> value.error(error, null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private void handleClose() { final String message = String.format("Could not complete sends because link '%s' for '%s' is closed.", getLinkName(), entityPath); final AmqpErrorContext context = handler.getErrorContext(sender); synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends."); } pendingSendsMap.forEach((key, value) -> value.error(new AmqpException(true, message, context), null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private static boolean isGeneralSendError(Symbol amqpError) { return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR || amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED); } private static class WeightedDeliveryTag { private final String deliveryTag; private final int priority; WeightedDeliveryTag(final String deliveryTag, final int priority) { this.deliveryTag = deliveryTag; this.priority = priority; } private String getDeliveryTag() { return this.deliveryTag; } private int getPriority() { return this.priority; } } private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable { private static final long serialVersionUID = -7057500582037295635L; @Override public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) { return deliveryTag1.getPriority() - deliveryTag0.getPriority(); } } /** * Keeps track of messages that have been sent, but may not have been acknowledged by the service. */ private class SendTimeout implements Runnable { private final String deliveryTag; SendTimeout(String deliveryTag) { this.deliveryTag = deliveryTag; } @Override public void run() { final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { return; } Exception cause = lastKnownLinkError; final Exception lastError; final Instant lastErrorTime; synchronized (errorConditionLock) { lastError = lastKnownLinkError; lastErrorTime = lastKnownErrorReportedAt; } if (lastError != null && lastErrorTime != null) { final Instant now = Instant.now(); final boolean isLastErrorAfterSleepTime = lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS)); final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime; final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(retryOptions.getTryTimeout())); cause = isServerBusy || isLastErrorAfterOperationTimeout ? lastError : null; } final AmqpException exception; if (cause instanceof AmqpException) { exception = (AmqpException) cause; } else { exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR, String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath), handler.getErrorContext(sender)); } workItem.error(exception, null); } } }
nit: could size be **not** positive? is it worth logging it if it happens?
public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish data batch when disposed.", handler.getConnectionId(), getLinkName()))); } if (messageBatch.size() == 1) { return send(messageBatch.get(0), deliveryState); } return getLinkSize() .flatMap(maxMessageSize -> { int totalEncodedSize = 0; final CompositeReadableBuffer buffer = new CompositeReadableBuffer(); final byte[] envelopBytes = batchEnvelopBytes(messageBatch.get(0), maxMessageSize); if (envelopBytes.length > 0) { totalEncodedSize += envelopBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(envelopBytes); } for (final Message message : messageBatch) { final byte[] sectionBytes = batchBinaryDataSectionBytes(message, maxMessageSize); if (sectionBytes.length > 0) { totalEncodedSize += sectionBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(sectionBytes); } } return send(buffer, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState); }).then(); }
totalEncodedSize += sectionBytes.length;
public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish data batch when disposed.", handler.getConnectionId(), getLinkName()))); } if (messageBatch.size() == 1) { return send(messageBatch.get(0), deliveryState); } return getLinkSize() .flatMap(maxMessageSize -> { int totalEncodedSize = 0; final CompositeReadableBuffer buffer = new CompositeReadableBuffer(); final byte[] envelopBytes = batchEnvelopBytes(messageBatch.get(0), maxMessageSize); if (envelopBytes.length > 0) { totalEncodedSize += envelopBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(envelopBytes); } for (final Message message : messageBatch) { final byte[] sectionBytes = batchBinaryDataSectionBytes(message, maxMessageSize); if (sectionBytes.length > 0) { totalEncodedSize += sectionBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(sectionBytes); } else { logger.info("Ignoring the empty message org.apache.qpid.proton.message.message@{} in the batch.", Integer.toHexString(System.identityHashCode(message))); } } return send(buffer, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState); }).then(); }
class ReactorSender implements AmqpSendLink, AsyncCloseable, AutoCloseable { private static final String DELIVERY_TAG_KEY = "deliveryTag"; private static final String PENDING_SENDS_SIZE_KEY = "pending_sends_size"; private final String entityPath; private final Sender sender; private final SendLinkHandler handler; private final ReactorProvider reactorProvider; private final Disposable.Composite subscriptions; private final AtomicBoolean hasConnected = new AtomicBoolean(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final Object pendingSendLock = new Object(); private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>(); private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue = new PriorityQueue<>(1000, new DeliveryTagComparator()); private final ClientLogger logger; private final Flux<AmqpEndpointState> endpointStates; private final TokenManager tokenManager; private final MessageSerializer messageSerializer; private final AmqpRetryPolicy retry; private final AmqpRetryOptions retryOptions; private final String activeTimeoutMessage; private final Scheduler scheduler; private final AmqpMetricsProvider metricsProvider; private final Object errorConditionLock = new Object(); private volatile Exception lastKnownLinkError; private volatile Instant lastKnownErrorReportedAt; private volatile int linkSize; /** * Creates an instance of {@link ReactorSender}. * * @param amqpConnection The parent {@link AmqpConnection} that this sender lives in. * @param entityPath The message broker address for the sender. * @param sender The underlying proton-j sender. * @param handler The proton-j handler associated with the sender. * @param reactorProvider Provider to schedule work on the proton-j reactor. * @param tokenManager Token manager for authorising with the CBS node. Can be {@code null} if it is part of the * transaction manager. * @param messageSerializer Serializer to deserialise and serialize AMQP messages. * @param retryOptions Retry options. * @param scheduler Scheduler to schedule send timeout. */ ReactorSender(AmqpConnection amqpConnection, String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider, TokenManager tokenManager, MessageSerializer messageSerializer, AmqpRetryOptions retryOptions, Scheduler scheduler, AmqpMetricsProvider metricsProvider) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.sender = Objects.requireNonNull(sender, "'sender' cannot be null."); this.handler = Objects.requireNonNull(handler, "'handler' cannot be null."); this.reactorProvider = Objects.requireNonNull(reactorProvider, "'reactorProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.scheduler = Objects.requireNonNull(scheduler, "'scheduler' cannot be null."); this.retry = RetryUtil.getRetryPolicy(retryOptions); this.tokenManager = tokenManager; this.metricsProvider = metricsProvider; String connectionId = handler.getConnectionId() == null ? NOT_APPLICABLE : handler.getConnectionId(); String linkName = getLinkName() == null ? NOT_APPLICABLE : getLinkName(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(LINK_NAME_KEY, linkName); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(ReactorSender.class, loggingContext); this.activeTimeoutMessage = String.format( "ReactorSender connectionId[%s] linkName[%s]: Waiting for send and receive handler to be ACTIVE", handler.getConnectionId(), handler.getLinkName()); this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.verbose("State {}", state); this.hasConnected.set(state == EndpointState.ACTIVE); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { hasConnected.set(false); handleError(error); }) .doOnComplete(() -> { hasConnected.set(false); handleClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage), this.handler.getLinkCredits().subscribe(credit -> { logger.atVerbose().addKeyValue("credits", credit) .log("Credits on link."); this.scheduleWorkOnDispatcher(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); hasConnected.set(false); return closeAsync("Connection shutdown.", null); }).subscribe() ); if (tokenManager != null) { this.subscriptions.add(tokenManager.getAuthorizationResults().onErrorResume(error -> { final Mono<Void> operation = closeAsync(String.format("connectionId[%s] linkName[%s] Token renewal failure. Disposing send " + "link.", amqpConnection.getId(), getLinkName()), new ErrorCondition(Symbol.getSymbol(NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> { logger.atVerbose().addKeyValue("response", response) .log("Token refreshed."); }, error -> { }, () -> { logger.verbose(" Authorization completed. Disposing."); closeAsync("Authorization completed. Disposing.", null).subscribe(); })); } } @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } @Override public Mono<Void> send(Message message) { return send(message, null); } @Override public Mono<Void> send(Message message, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish message when disposed.", handler.getConnectionId(), getLinkName()))); } return getLinkSize() .flatMap(maxMessageSize -> { final int payloadSize = messageSerializer.getSize(message); final int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] bytes = new byte[allocationSize]; int encodedSize; try { encodedSize = message.encode(bytes, 0, allocationSize); } catch (BufferOverflowException exception) { final String errorMessage = String.format(Locale.US, "Error sending. Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024); final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, errorMessage, exception, handler.getErrorContext(sender)); return Mono.error(error); } return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState); }).then(); } @Override public Mono<Void> send(List<Message> messageBatch) { return send(messageBatch, null); } @Override private byte[] batchEnvelopBytes(Message envelopMessage, int maxMessageSize) { final Message message = Proton.message(); message.setMessageAnnotations(envelopMessage.getMessageAnnotations()); final int size = messageSerializer.getSize(message); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = message.encode(encodedBytes, 0, allocationSize); return Arrays.copyOf(encodedBytes, encodedSize); } private byte[] batchBinaryDataSectionBytes(Message sectionMessage, int maxMessageSize) { final int size = messageSerializer.getSize(sectionMessage); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = sectionMessage.encode(encodedBytes, 0, allocationSize); final Message message = Proton.message(); final Data binaryData = new Data(new Binary(encodedBytes, 0, encodedSize)); message.setBody(binaryData); final int binaryRawSize = binaryData.getValue().getLength(); final int binaryEncodedSize = binaryEncodedSize(binaryRawSize); final byte[] binaryEncodedBytes = new byte[binaryEncodedSize]; message.encode(binaryEncodedBytes, 0, binaryEncodedSize); return binaryEncodedBytes; } private Mono<Void> batchBufferOverflowError(int maxMessageSize) { return Mono.error(new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), new BufferOverflowException(), handler.getErrorContext(sender))); } /** * Compute the encoded size when encoding a binary data of given size per Amqp 1.0 spec "amqp:data:binary" format. * * @param binaryRawSize the length of the binary data. * @return the encoded size. */ private int binaryEncodedSize(int binaryRawSize) { if (binaryRawSize <= 255) { return 5 + binaryRawSize; } else { return 8 + binaryRawSize; } } @Override public AmqpErrorContext getErrorContext() { return handler.getErrorContext(sender); } @Override public String getLinkName() { return sender.getName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override public Mono<Integer> getLinkSize() { if (linkSize > 0) { return Mono.defer(() -> Mono.just(this.linkSize)); } synchronized (this) { if (linkSize > 0) { return Mono.defer(() -> Mono.just(linkSize)); } return RetryUtil.withRetry(getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE), retryOptions, activeTimeoutMessage) .then(Mono.fromCallable(() -> { final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize(); if (remoteMaxMessageSize != null) { linkSize = remoteMaxMessageSize.intValue(); } else { logger.warning("Could not get the getRemoteMaxMessageSize. Returning current link size: {}", linkSize); } return linkSize; })); } } @Override public boolean isDisposed() { return isDisposed.get(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void dispose() { close(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } /** * Disposes of the sender. * * @param errorCondition Error condition associated with close operation. * @param message Message associated with why the sender was closed. * * @return A mono that completes when the send link has closed. */ Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return isClosedMono.asMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .log("Setting error condition and disposing. {}", message); final Runnable closeWork = () -> { if (errorCondition != null && sender.getCondition() == null) { sender.setCondition(errorCondition); } sender.close(); }; return Mono.fromRunnable(() -> { try { reactorProvider.getReactorDispatcher().invoke(closeWork); } catch (IOException e) { logger.warning("Could not schedule close work. Running manually. And completing close.", e); closeWork.run(); handleClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException scheduling close work. And completing close."); closeWork.run(); handleClose(); } }).then(isClosedMono.asMono()) .publishOn(Schedulers.boundedElastic()); } /** * A mono that completes when the sender has completely closed. * * @return mono that completes when the sender has completely closed. */ Mono<Void> isClosed() { return isClosedMono.asMono(); } @Override public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) { return onEndpointActive().then(Mono.create(sink -> { sendWork(new RetriableWorkItem(bytes, arrayOffset, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } Mono<DeliveryState> send(ReadableBuffer buffer, int messageFormat, DeliveryState deliveryState) { return onEndpointActive().then(Mono.create(sink -> { sendWork(new RetriableWorkItem(buffer, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } private Flux<EndpointState> onEndpointActive() { return RetryUtil.withRetry(handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), retryOptions, activeTimeoutMessage); } /** * Add the work item in pending send to be processed on {@link ReactorDispatcher} thread. * * @param workItem to be processed. */ private void sendWork(RetriableWorkItem workItem) { final String deliveryTag = UUID.randomUUID().toString().replace("-", ""); synchronized (pendingSendLock) { this.pendingSendsMap.put(deliveryTag, workItem); this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0)); } this.scheduleWorkOnDispatcher(); } /** * Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke() */ private void processSendWork() { if (!hasConnected.get()) { logger.warning("Not connected. Not processing send work."); return; } if (isDisposed.get()) { logger.info("Sender is closed. Not executing work."); return; } while (hasConnected.get() && sender.getCredit() > 0) { final WeightedDeliveryTag weightedDelivery; final RetriableWorkItem workItem; final String deliveryTag; synchronized (pendingSendLock) { weightedDelivery = this.pendingSendsQueue.poll(); if (weightedDelivery != null) { deliveryTag = weightedDelivery.getDeliveryTag(); workItem = this.pendingSendsMap.get(deliveryTag); } else { workItem = null; deliveryTag = null; } } if (workItem == null) { if (deliveryTag != null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("sendData not found for this delivery."); } break; } Delivery delivery = null; boolean linkAdvance = false; int sentMsgSize = 0; Exception sendException = null; try { workItem.beforeTry(); delivery = sender.delivery(deliveryTag.getBytes(UTF_8)); delivery.setMessageFormat(workItem.getMessageFormat()); if (workItem.isDeliveryStateProvided()) { delivery.disposition(workItem.getDeliveryState()); } workItem.send(sender); linkAdvance = sender.advance(); } catch (Exception exception) { sendException = exception; } if (linkAdvance) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Sent message."); workItem.setWaitingForAck(); scheduler.schedule(new SendTimeout(deliveryTag), retryOptions.getTryTimeout().toMillis(), TimeUnit.MILLISECONDS); } else { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("sentMessageSize", sentMsgSize) .addKeyValue("payloadActualSize", workItem.getEncodedMessageSize()) .log("Sendlink advance failed."); DeliveryState outcome = null; if (delivery != null) { outcome = delivery.getRemoteState(); delivery.free(); } final AmqpErrorContext context = handler.getErrorContext(sender); final Throwable exception = sendException != null ? new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed. Please see cause for more details", entityPath), sendException, context) : new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed while advancing delivery(tag: %s).", entityPath, deliveryTag), context); workItem.error(exception, outcome); } } } private void processDeliveredMessage(Delivery delivery) { final DeliveryState outcome = delivery.getRemoteState(); final String deliveryTag = new String(delivery.getTag(), UTF_8); logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Process delivered message."); final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Mismatch (or send timed out)."); return; } else if (workItem.isDeliveryStateProvided()) { workItem.success(outcome); return; } if (outcome instanceof Accepted) { synchronized (errorConditionLock) { lastKnownLinkError = null; lastKnownErrorReportedAt = null; retryAttempts.set(0); } workItem.success(outcome); } else if (outcome instanceof Rejected) { final Rejected rejected = (Rejected) outcome; final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError(); final Exception exception = ExceptionUtil.toException(error.getCondition().toString(), error.getDescription(), handler.getErrorContext(sender)); logger.atWarning() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("rejected", rejected) .log("Delivery rejected."); final int retryAttempt; if (isGeneralSendError(error.getCondition())) { synchronized (errorConditionLock) { lastKnownLinkError = exception; lastKnownErrorReportedAt = Instant.now(); retryAttempt = retryAttempts.incrementAndGet(); } } else { retryAttempt = retryAttempts.get(); } final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt); if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) { cleanupFailedSend(workItem, exception, outcome); } else { workItem.setLastKnownException(exception); try { reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval); } catch (IOException | RejectedExecutionException schedulerException) { exception.initCause(schedulerException); cleanupFailedSend( workItem, new AmqpException(false, String.format(Locale.US, "Entity(%s): send operation failed while scheduling a" + " retry on Reactor, see cause for more details.", entityPath), schedulerException, handler.getErrorContext(sender)), outcome); } } } else if (outcome instanceof Released) { cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(), handler.getErrorContext(sender)), outcome); } else if (outcome instanceof Declared) { final Declared declared = (Declared) outcome; workItem.success(declared); } else { cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(), handler.getErrorContext(sender)), outcome); } } private void scheduleWorkOnDispatcher() { try { reactorProvider.getReactorDispatcher().invoke(this::processSendWork); } catch (IOException e) { logger.warning("Error scheduling work on reactor.", e); } catch (RejectedExecutionException e) { logger.info("Error scheduling work on reactor because of RejectedExecutionException."); } } private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception, final DeliveryState deliveryState) { workItem.error(exception, deliveryState); } private void completeClose() { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result).log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } } /** * Clears pending sends and puts an error in there. * * @param error Error to pass to pending sends. */ private void handleError(Throwable error) { synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed. Dropping error."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends with error."); } pendingSendsMap.forEach((key, value) -> value.error(error, null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private void handleClose() { final String message = String.format("Could not complete sends because link '%s' for '%s' is closed.", getLinkName(), entityPath); final AmqpErrorContext context = handler.getErrorContext(sender); synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends."); } pendingSendsMap.forEach((key, value) -> value.error(new AmqpException(true, message, context), null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private static boolean isGeneralSendError(Symbol amqpError) { return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR || amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED); } private static class WeightedDeliveryTag { private final String deliveryTag; private final int priority; WeightedDeliveryTag(final String deliveryTag, final int priority) { this.deliveryTag = deliveryTag; this.priority = priority; } private String getDeliveryTag() { return this.deliveryTag; } private int getPriority() { return this.priority; } } private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable { private static final long serialVersionUID = -7057500582037295635L; @Override public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) { return deliveryTag1.getPriority() - deliveryTag0.getPriority(); } } /** * Keeps track of messages that have been sent, but may not have been acknowledged by the service. */ private class SendTimeout implements Runnable { private final String deliveryTag; SendTimeout(String deliveryTag) { this.deliveryTag = deliveryTag; } @Override public void run() { final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { return; } Exception cause = lastKnownLinkError; final Exception lastError; final Instant lastErrorTime; synchronized (errorConditionLock) { lastError = lastKnownLinkError; lastErrorTime = lastKnownErrorReportedAt; } if (lastError != null && lastErrorTime != null) { final Instant now = Instant.now(); final boolean isLastErrorAfterSleepTime = lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS)); final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime; final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(retryOptions.getTryTimeout())); cause = isServerBusy || isLastErrorAfterOperationTimeout ? lastError : null; } final AmqpException exception; if (cause instanceof AmqpException) { exception = (AmqpException) cause; } else { exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR, String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath), handler.getErrorContext(sender)); } workItem.error(exception, null); } } }
class ReactorSender implements AmqpSendLink, AsyncCloseable, AutoCloseable { private static final String DELIVERY_TAG_KEY = "deliveryTag"; private static final String PENDING_SENDS_SIZE_KEY = "pending_sends_size"; private final String entityPath; private final Sender sender; private final SendLinkHandler handler; private final ReactorProvider reactorProvider; private final Disposable.Composite subscriptions; private final AtomicBoolean hasConnected = new AtomicBoolean(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final Object pendingSendLock = new Object(); private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>(); private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue = new PriorityQueue<>(1000, new DeliveryTagComparator()); private final ClientLogger logger; private final Flux<AmqpEndpointState> endpointStates; private final TokenManager tokenManager; private final MessageSerializer messageSerializer; private final AmqpRetryPolicy retry; private final AmqpRetryOptions retryOptions; private final String activeTimeoutMessage; private final Scheduler scheduler; private final AmqpMetricsProvider metricsProvider; private final Object errorConditionLock = new Object(); private volatile Exception lastKnownLinkError; private volatile Instant lastKnownErrorReportedAt; private volatile int linkSize; /** * Creates an instance of {@link ReactorSender}. * * @param amqpConnection The parent {@link AmqpConnection} that this sender lives in. * @param entityPath The message broker address for the sender. * @param sender The underlying proton-j sender. * @param handler The proton-j handler associated with the sender. * @param reactorProvider Provider to schedule work on the proton-j reactor. * @param tokenManager Token manager for authorising with the CBS node. Can be {@code null} if it is part of the * transaction manager. * @param messageSerializer Serializer to deserialise and serialize AMQP messages. * @param retryOptions Retry options. * @param scheduler Scheduler to schedule send timeout. */ ReactorSender(AmqpConnection amqpConnection, String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider, TokenManager tokenManager, MessageSerializer messageSerializer, AmqpRetryOptions retryOptions, Scheduler scheduler, AmqpMetricsProvider metricsProvider) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.sender = Objects.requireNonNull(sender, "'sender' cannot be null."); this.handler = Objects.requireNonNull(handler, "'handler' cannot be null."); this.reactorProvider = Objects.requireNonNull(reactorProvider, "'reactorProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.scheduler = Objects.requireNonNull(scheduler, "'scheduler' cannot be null."); this.retry = RetryUtil.getRetryPolicy(retryOptions); this.tokenManager = tokenManager; this.metricsProvider = metricsProvider; String connectionId = handler.getConnectionId() == null ? NOT_APPLICABLE : handler.getConnectionId(); String linkName = getLinkName() == null ? NOT_APPLICABLE : getLinkName(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(LINK_NAME_KEY, linkName); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(ReactorSender.class, loggingContext); this.activeTimeoutMessage = String.format( "ReactorSender connectionId[%s] linkName[%s]: Waiting for send and receive handler to be ACTIVE", handler.getConnectionId(), handler.getLinkName()); this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.verbose("State {}", state); this.hasConnected.set(state == EndpointState.ACTIVE); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { hasConnected.set(false); handleError(error); }) .doOnComplete(() -> { hasConnected.set(false); handleClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage), this.handler.getLinkCredits().subscribe(credit -> { logger.atVerbose().addKeyValue("credits", credit) .log("Credits on link."); this.scheduleWorkOnDispatcher(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); hasConnected.set(false); return closeAsync("Connection shutdown.", null); }).subscribe() ); if (tokenManager != null) { this.subscriptions.add(tokenManager.getAuthorizationResults().onErrorResume(error -> { final Mono<Void> operation = closeAsync(String.format("connectionId[%s] linkName[%s] Token renewal failure. Disposing send " + "link.", amqpConnection.getId(), getLinkName()), new ErrorCondition(Symbol.getSymbol(NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> { logger.atVerbose().addKeyValue("response", response) .log("Token refreshed."); }, error -> { }, () -> { logger.verbose(" Authorization completed. Disposing."); closeAsync("Authorization completed. Disposing.", null).subscribe(); })); } } @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } @Override public Mono<Void> send(Message message) { return send(message, null); } @Override public Mono<Void> send(Message message, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish message when disposed.", handler.getConnectionId(), getLinkName()))); } return getLinkSize() .flatMap(maxMessageSize -> { final int payloadSize = messageSerializer.getSize(message); final int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] bytes = new byte[allocationSize]; int encodedSize; try { encodedSize = message.encode(bytes, 0, allocationSize); } catch (BufferOverflowException exception) { final String errorMessage = String.format(Locale.US, "Error sending. Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024); final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, errorMessage, exception, handler.getErrorContext(sender)); return Mono.error(error); } return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState); }).then(); } @Override public Mono<Void> send(List<Message> messageBatch) { return send(messageBatch, null); } @Override private byte[] batchEnvelopBytes(Message envelopMessage, int maxMessageSize) { final Message message = Proton.message(); message.setMessageAnnotations(envelopMessage.getMessageAnnotations()); if ((envelopMessage.getMessageId() instanceof String) && !CoreUtils.isNullOrEmpty((String) envelopMessage.getMessageId())) { message.setMessageId(envelopMessage.getMessageId()); } if (!CoreUtils.isNullOrEmpty(envelopMessage.getGroupId())) { message.setGroupId(envelopMessage.getGroupId()); } final int size = messageSerializer.getSize(message); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = message.encode(encodedBytes, 0, allocationSize); return Arrays.copyOf(encodedBytes, encodedSize); } private byte[] batchBinaryDataSectionBytes(Message sectionMessage, int maxMessageSize) { final int size = messageSerializer.getSize(sectionMessage); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = sectionMessage.encode(encodedBytes, 0, allocationSize); final Message message = Proton.message(); final Data binaryData = new Data(new Binary(encodedBytes, 0, encodedSize)); message.setBody(binaryData); final int binaryRawSize = binaryData.getValue().getLength(); final int binaryEncodedSize = binaryEncodedSize(binaryRawSize); final byte[] binaryEncodedBytes = new byte[binaryEncodedSize]; message.encode(binaryEncodedBytes, 0, binaryEncodedSize); return binaryEncodedBytes; } private Mono<Void> batchBufferOverflowError(int maxMessageSize) { return FluxUtil.monoError(logger, new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), new BufferOverflowException(), handler.getErrorContext(sender))); } /** * Compute the encoded size when encoding a binary data of given size per Amqp 1.0 spec "amqp:data:binary" format. * * @param binaryRawSize the length of the binary data. * @return the encoded size. */ private int binaryEncodedSize(int binaryRawSize) { if (binaryRawSize <= 255) { return 5 + binaryRawSize; } else { return 8 + binaryRawSize; } } @Override public AmqpErrorContext getErrorContext() { return handler.getErrorContext(sender); } @Override public String getLinkName() { return sender.getName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override public Mono<Integer> getLinkSize() { if (linkSize > 0) { return Mono.defer(() -> Mono.just(this.linkSize)); } synchronized (this) { if (linkSize > 0) { return Mono.defer(() -> Mono.just(linkSize)); } return RetryUtil.withRetry(getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE), retryOptions, activeTimeoutMessage) .then(Mono.fromCallable(() -> { final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize(); if (remoteMaxMessageSize != null) { linkSize = remoteMaxMessageSize.intValue(); } else { logger.warning("Could not get the getRemoteMaxMessageSize. Returning current link size: {}", linkSize); } return linkSize; })); } } @Override public boolean isDisposed() { return isDisposed.get(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void dispose() { close(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } /** * Disposes of the sender. * * @param errorCondition Error condition associated with close operation. * @param message Message associated with why the sender was closed. * * @return A mono that completes when the send link has closed. */ Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return isClosedMono.asMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .log("Setting error condition and disposing. {}", message); final Runnable closeWork = () -> { if (errorCondition != null && sender.getCondition() == null) { sender.setCondition(errorCondition); } sender.close(); }; return Mono.fromRunnable(() -> { try { reactorProvider.getReactorDispatcher().invoke(closeWork); } catch (IOException e) { logger.warning("Could not schedule close work. Running manually. And completing close.", e); closeWork.run(); handleClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException scheduling close work. And completing close."); closeWork.run(); handleClose(); } }).then(isClosedMono.asMono()) .publishOn(Schedulers.boundedElastic()); } /** * A mono that completes when the sender has completely closed. * * @return mono that completes when the sender has completely closed. */ Mono<Void> isClosed() { return isClosedMono.asMono(); } @Override public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) { return onEndpointActive().then(Mono.create(sink -> { sendWork(new RetriableWorkItem(bytes, arrayOffset, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } Mono<DeliveryState> send(ReadableBuffer buffer, int messageFormat, DeliveryState deliveryState) { return onEndpointActive().then(Mono.create(sink -> { sendWork(new RetriableWorkItem(buffer, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } private Flux<EndpointState> onEndpointActive() { return RetryUtil.withRetry(handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), retryOptions, activeTimeoutMessage); } /** * Add the work item in pending send to be processed on {@link ReactorDispatcher} thread. * * @param workItem to be processed. */ private void sendWork(RetriableWorkItem workItem) { final String deliveryTag = UUID.randomUUID().toString().replace("-", ""); synchronized (pendingSendLock) { this.pendingSendsMap.put(deliveryTag, workItem); this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0)); } this.scheduleWorkOnDispatcher(); } /** * Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke() */ private void processSendWork() { if (!hasConnected.get()) { logger.warning("Not connected. Not processing send work."); return; } if (isDisposed.get()) { logger.info("Sender is closed. Not executing work."); return; } while (hasConnected.get() && sender.getCredit() > 0) { final WeightedDeliveryTag weightedDelivery; final RetriableWorkItem workItem; final String deliveryTag; synchronized (pendingSendLock) { weightedDelivery = this.pendingSendsQueue.poll(); if (weightedDelivery != null) { deliveryTag = weightedDelivery.getDeliveryTag(); workItem = this.pendingSendsMap.get(deliveryTag); } else { workItem = null; deliveryTag = null; } } if (workItem == null) { if (deliveryTag != null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("sendData not found for this delivery."); } break; } Delivery delivery = null; boolean linkAdvance = false; int sentMsgSize = 0; Exception sendException = null; try { workItem.beforeTry(); delivery = sender.delivery(deliveryTag.getBytes(UTF_8)); delivery.setMessageFormat(workItem.getMessageFormat()); if (workItem.isDeliveryStateProvided()) { delivery.disposition(workItem.getDeliveryState()); } workItem.send(sender); linkAdvance = sender.advance(); } catch (Exception exception) { sendException = exception; } if (linkAdvance) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Sent message."); workItem.setWaitingForAck(); scheduler.schedule(new SendTimeout(deliveryTag), retryOptions.getTryTimeout().toMillis(), TimeUnit.MILLISECONDS); } else { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("sentMessageSize", sentMsgSize) .addKeyValue("payloadActualSize", workItem.getEncodedMessageSize()) .log("Sendlink advance failed."); DeliveryState outcome = null; if (delivery != null) { outcome = delivery.getRemoteState(); delivery.free(); } final AmqpErrorContext context = handler.getErrorContext(sender); final Throwable exception = sendException != null ? new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed. Please see cause for more details", entityPath), sendException, context) : new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed while advancing delivery(tag: %s).", entityPath, deliveryTag), context); workItem.error(exception, outcome); } } } private void processDeliveredMessage(Delivery delivery) { final DeliveryState outcome = delivery.getRemoteState(); final String deliveryTag = new String(delivery.getTag(), UTF_8); logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Process delivered message."); final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Mismatch (or send timed out)."); return; } else if (workItem.isDeliveryStateProvided()) { workItem.success(outcome); return; } if (outcome instanceof Accepted) { synchronized (errorConditionLock) { lastKnownLinkError = null; lastKnownErrorReportedAt = null; retryAttempts.set(0); } workItem.success(outcome); } else if (outcome instanceof Rejected) { final Rejected rejected = (Rejected) outcome; final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError(); final Exception exception = ExceptionUtil.toException(error.getCondition().toString(), error.getDescription(), handler.getErrorContext(sender)); logger.atWarning() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("rejected", rejected) .log("Delivery rejected."); final int retryAttempt; if (isGeneralSendError(error.getCondition())) { synchronized (errorConditionLock) { lastKnownLinkError = exception; lastKnownErrorReportedAt = Instant.now(); retryAttempt = retryAttempts.incrementAndGet(); } } else { retryAttempt = retryAttempts.get(); } final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt); if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) { cleanupFailedSend(workItem, exception, outcome); } else { workItem.setLastKnownException(exception); try { reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval); } catch (IOException | RejectedExecutionException schedulerException) { exception.initCause(schedulerException); cleanupFailedSend( workItem, new AmqpException(false, String.format(Locale.US, "Entity(%s): send operation failed while scheduling a" + " retry on Reactor, see cause for more details.", entityPath), schedulerException, handler.getErrorContext(sender)), outcome); } } } else if (outcome instanceof Released) { cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(), handler.getErrorContext(sender)), outcome); } else if (outcome instanceof Declared) { final Declared declared = (Declared) outcome; workItem.success(declared); } else { cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(), handler.getErrorContext(sender)), outcome); } } private void scheduleWorkOnDispatcher() { try { reactorProvider.getReactorDispatcher().invoke(this::processSendWork); } catch (IOException e) { logger.warning("Error scheduling work on reactor.", e); } catch (RejectedExecutionException e) { logger.info("Error scheduling work on reactor because of RejectedExecutionException."); } } private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception, final DeliveryState deliveryState) { workItem.error(exception, deliveryState); } private void completeClose() { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result).log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } } /** * Clears pending sends and puts an error in there. * * @param error Error to pass to pending sends. */ private void handleError(Throwable error) { synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed. Dropping error."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends with error."); } pendingSendsMap.forEach((key, value) -> value.error(error, null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private void handleClose() { final String message = String.format("Could not complete sends because link '%s' for '%s' is closed.", getLinkName(), entityPath); final AmqpErrorContext context = handler.getErrorContext(sender); synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends."); } pendingSendsMap.forEach((key, value) -> value.error(new AmqpException(true, message, context), null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private static boolean isGeneralSendError(Symbol amqpError) { return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR || amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED); } private static class WeightedDeliveryTag { private final String deliveryTag; private final int priority; WeightedDeliveryTag(final String deliveryTag, final int priority) { this.deliveryTag = deliveryTag; this.priority = priority; } private String getDeliveryTag() { return this.deliveryTag; } private int getPriority() { return this.priority; } } private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable { private static final long serialVersionUID = -7057500582037295635L; @Override public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) { return deliveryTag1.getPriority() - deliveryTag0.getPriority(); } } /** * Keeps track of messages that have been sent, but may not have been acknowledged by the service. */ private class SendTimeout implements Runnable { private final String deliveryTag; SendTimeout(String deliveryTag) { this.deliveryTag = deliveryTag; } @Override public void run() { final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { return; } Exception cause = lastKnownLinkError; final Exception lastError; final Instant lastErrorTime; synchronized (errorConditionLock) { lastError = lastKnownLinkError; lastErrorTime = lastKnownErrorReportedAt; } if (lastError != null && lastErrorTime != null) { final Instant now = Instant.now(); final boolean isLastErrorAfterSleepTime = lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS)); final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime; final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(retryOptions.getTryTimeout())); cause = isServerBusy || isLastErrorAfterOperationTimeout ? lastError : null; } final AmqpException exception; if (cause instanceof AmqpException) { exception = (AmqpException) cause; } else { exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR, String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath), handler.getErrorContext(sender)); } workItem.error(exception, null); } } }
nit: could `ByteBuffer` with the ability to set limit instead of copying arrays have any benefits?
private byte[] batchBinaryDataSectionBytes(Message sectionMessage, int maxMessageSize) { final int size = messageSerializer.getSize(sectionMessage); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = sectionMessage.encode(encodedBytes, 0, allocationSize); final Message message = Proton.message(); final Data binaryData = new Data(new Binary(encodedBytes, 0, encodedSize)); message.setBody(binaryData); final int binaryRawSize = binaryData.getValue().getLength(); final int binaryEncodedSize = binaryEncodedSize(binaryRawSize); final byte[] binaryEncodedBytes = new byte[binaryEncodedSize]; message.encode(binaryEncodedBytes, 0, binaryEncodedSize); return binaryEncodedBytes; }
private byte[] batchBinaryDataSectionBytes(Message sectionMessage, int maxMessageSize) { final int size = messageSerializer.getSize(sectionMessage); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = sectionMessage.encode(encodedBytes, 0, allocationSize); final Message message = Proton.message(); final Data binaryData = new Data(new Binary(encodedBytes, 0, encodedSize)); message.setBody(binaryData); final int binaryRawSize = binaryData.getValue().getLength(); final int binaryEncodedSize = binaryEncodedSize(binaryRawSize); final byte[] binaryEncodedBytes = new byte[binaryEncodedSize]; message.encode(binaryEncodedBytes, 0, binaryEncodedSize); return binaryEncodedBytes; }
class ReactorSender implements AmqpSendLink, AsyncCloseable, AutoCloseable { private static final String DELIVERY_TAG_KEY = "deliveryTag"; private static final String PENDING_SENDS_SIZE_KEY = "pending_sends_size"; private final String entityPath; private final Sender sender; private final SendLinkHandler handler; private final ReactorProvider reactorProvider; private final Disposable.Composite subscriptions; private final AtomicBoolean hasConnected = new AtomicBoolean(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final Object pendingSendLock = new Object(); private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>(); private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue = new PriorityQueue<>(1000, new DeliveryTagComparator()); private final ClientLogger logger; private final Flux<AmqpEndpointState> endpointStates; private final TokenManager tokenManager; private final MessageSerializer messageSerializer; private final AmqpRetryPolicy retry; private final AmqpRetryOptions retryOptions; private final String activeTimeoutMessage; private final Scheduler scheduler; private final AmqpMetricsProvider metricsProvider; private final Object errorConditionLock = new Object(); private volatile Exception lastKnownLinkError; private volatile Instant lastKnownErrorReportedAt; private volatile int linkSize; /** * Creates an instance of {@link ReactorSender}. * * @param amqpConnection The parent {@link AmqpConnection} that this sender lives in. * @param entityPath The message broker address for the sender. * @param sender The underlying proton-j sender. * @param handler The proton-j handler associated with the sender. * @param reactorProvider Provider to schedule work on the proton-j reactor. * @param tokenManager Token manager for authorising with the CBS node. Can be {@code null} if it is part of the * transaction manager. * @param messageSerializer Serializer to deserialise and serialize AMQP messages. * @param retryOptions Retry options. * @param scheduler Scheduler to schedule send timeout. */ ReactorSender(AmqpConnection amqpConnection, String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider, TokenManager tokenManager, MessageSerializer messageSerializer, AmqpRetryOptions retryOptions, Scheduler scheduler, AmqpMetricsProvider metricsProvider) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.sender = Objects.requireNonNull(sender, "'sender' cannot be null."); this.handler = Objects.requireNonNull(handler, "'handler' cannot be null."); this.reactorProvider = Objects.requireNonNull(reactorProvider, "'reactorProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.scheduler = Objects.requireNonNull(scheduler, "'scheduler' cannot be null."); this.retry = RetryUtil.getRetryPolicy(retryOptions); this.tokenManager = tokenManager; this.metricsProvider = metricsProvider; String connectionId = handler.getConnectionId() == null ? NOT_APPLICABLE : handler.getConnectionId(); String linkName = getLinkName() == null ? NOT_APPLICABLE : getLinkName(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(LINK_NAME_KEY, linkName); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(ReactorSender.class, loggingContext); this.activeTimeoutMessage = String.format( "ReactorSender connectionId[%s] linkName[%s]: Waiting for send and receive handler to be ACTIVE", handler.getConnectionId(), handler.getLinkName()); this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.verbose("State {}", state); this.hasConnected.set(state == EndpointState.ACTIVE); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { hasConnected.set(false); handleError(error); }) .doOnComplete(() -> { hasConnected.set(false); handleClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage), this.handler.getLinkCredits().subscribe(credit -> { logger.atVerbose().addKeyValue("credits", credit) .log("Credits on link."); this.scheduleWorkOnDispatcher(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); hasConnected.set(false); return closeAsync("Connection shutdown.", null); }).subscribe() ); if (tokenManager != null) { this.subscriptions.add(tokenManager.getAuthorizationResults().onErrorResume(error -> { final Mono<Void> operation = closeAsync(String.format("connectionId[%s] linkName[%s] Token renewal failure. Disposing send " + "link.", amqpConnection.getId(), getLinkName()), new ErrorCondition(Symbol.getSymbol(NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> { logger.atVerbose().addKeyValue("response", response) .log("Token refreshed."); }, error -> { }, () -> { logger.verbose(" Authorization completed. Disposing."); closeAsync("Authorization completed. Disposing.", null).subscribe(); })); } } @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } @Override public Mono<Void> send(Message message) { return send(message, null); } @Override public Mono<Void> send(Message message, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish message when disposed.", handler.getConnectionId(), getLinkName()))); } return getLinkSize() .flatMap(maxMessageSize -> { final int payloadSize = messageSerializer.getSize(message); final int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] bytes = new byte[allocationSize]; int encodedSize; try { encodedSize = message.encode(bytes, 0, allocationSize); } catch (BufferOverflowException exception) { final String errorMessage = String.format(Locale.US, "Error sending. Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024); final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, errorMessage, exception, handler.getErrorContext(sender)); return Mono.error(error); } return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState); }).then(); } @Override public Mono<Void> send(List<Message> messageBatch) { return send(messageBatch, null); } @Override public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish data batch when disposed.", handler.getConnectionId(), getLinkName()))); } if (messageBatch.size() == 1) { return send(messageBatch.get(0), deliveryState); } return getLinkSize() .flatMap(maxMessageSize -> { int totalEncodedSize = 0; final CompositeReadableBuffer buffer = new CompositeReadableBuffer(); final byte[] envelopBytes = batchEnvelopBytes(messageBatch.get(0), maxMessageSize); if (envelopBytes.length > 0) { totalEncodedSize += envelopBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(envelopBytes); } for (final Message message : messageBatch) { final byte[] sectionBytes = batchBinaryDataSectionBytes(message, maxMessageSize); if (sectionBytes.length > 0) { totalEncodedSize += sectionBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(sectionBytes); } } return send(buffer, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState); }).then(); } private byte[] batchEnvelopBytes(Message envelopMessage, int maxMessageSize) { final Message message = Proton.message(); message.setMessageAnnotations(envelopMessage.getMessageAnnotations()); final int size = messageSerializer.getSize(message); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = message.encode(encodedBytes, 0, allocationSize); return Arrays.copyOf(encodedBytes, encodedSize); } private Mono<Void> batchBufferOverflowError(int maxMessageSize) { return Mono.error(new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), new BufferOverflowException(), handler.getErrorContext(sender))); } /** * Compute the encoded size when encoding a binary data of given size per Amqp 1.0 spec "amqp:data:binary" format. * * @param binaryRawSize the length of the binary data. * @return the encoded size. */ private int binaryEncodedSize(int binaryRawSize) { if (binaryRawSize <= 255) { return 5 + binaryRawSize; } else { return 8 + binaryRawSize; } } @Override public AmqpErrorContext getErrorContext() { return handler.getErrorContext(sender); } @Override public String getLinkName() { return sender.getName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override public Mono<Integer> getLinkSize() { if (linkSize > 0) { return Mono.defer(() -> Mono.just(this.linkSize)); } synchronized (this) { if (linkSize > 0) { return Mono.defer(() -> Mono.just(linkSize)); } return RetryUtil.withRetry(getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE), retryOptions, activeTimeoutMessage) .then(Mono.fromCallable(() -> { final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize(); if (remoteMaxMessageSize != null) { linkSize = remoteMaxMessageSize.intValue(); } else { logger.warning("Could not get the getRemoteMaxMessageSize. Returning current link size: {}", linkSize); } return linkSize; })); } } @Override public boolean isDisposed() { return isDisposed.get(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void dispose() { close(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } /** * Disposes of the sender. * * @param errorCondition Error condition associated with close operation. * @param message Message associated with why the sender was closed. * * @return A mono that completes when the send link has closed. */ Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return isClosedMono.asMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .log("Setting error condition and disposing. {}", message); final Runnable closeWork = () -> { if (errorCondition != null && sender.getCondition() == null) { sender.setCondition(errorCondition); } sender.close(); }; return Mono.fromRunnable(() -> { try { reactorProvider.getReactorDispatcher().invoke(closeWork); } catch (IOException e) { logger.warning("Could not schedule close work. Running manually. And completing close.", e); closeWork.run(); handleClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException scheduling close work. And completing close."); closeWork.run(); handleClose(); } }).then(isClosedMono.asMono()) .publishOn(Schedulers.boundedElastic()); } /** * A mono that completes when the sender has completely closed. * * @return mono that completes when the sender has completely closed. */ Mono<Void> isClosed() { return isClosedMono.asMono(); } @Override public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) { return onEndpointActive().then(Mono.create(sink -> { sendWork(new RetriableWorkItem(bytes, arrayOffset, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } Mono<DeliveryState> send(ReadableBuffer buffer, int messageFormat, DeliveryState deliveryState) { return onEndpointActive().then(Mono.create(sink -> { sendWork(new RetriableWorkItem(buffer, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } private Flux<EndpointState> onEndpointActive() { return RetryUtil.withRetry(handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), retryOptions, activeTimeoutMessage); } /** * Add the work item in pending send to be processed on {@link ReactorDispatcher} thread. * * @param workItem to be processed. */ private void sendWork(RetriableWorkItem workItem) { final String deliveryTag = UUID.randomUUID().toString().replace("-", ""); synchronized (pendingSendLock) { this.pendingSendsMap.put(deliveryTag, workItem); this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0)); } this.scheduleWorkOnDispatcher(); } /** * Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke() */ private void processSendWork() { if (!hasConnected.get()) { logger.warning("Not connected. Not processing send work."); return; } if (isDisposed.get()) { logger.info("Sender is closed. Not executing work."); return; } while (hasConnected.get() && sender.getCredit() > 0) { final WeightedDeliveryTag weightedDelivery; final RetriableWorkItem workItem; final String deliveryTag; synchronized (pendingSendLock) { weightedDelivery = this.pendingSendsQueue.poll(); if (weightedDelivery != null) { deliveryTag = weightedDelivery.getDeliveryTag(); workItem = this.pendingSendsMap.get(deliveryTag); } else { workItem = null; deliveryTag = null; } } if (workItem == null) { if (deliveryTag != null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("sendData not found for this delivery."); } break; } Delivery delivery = null; boolean linkAdvance = false; int sentMsgSize = 0; Exception sendException = null; try { workItem.beforeTry(); delivery = sender.delivery(deliveryTag.getBytes(UTF_8)); delivery.setMessageFormat(workItem.getMessageFormat()); if (workItem.isDeliveryStateProvided()) { delivery.disposition(workItem.getDeliveryState()); } workItem.send(sender); linkAdvance = sender.advance(); } catch (Exception exception) { sendException = exception; } if (linkAdvance) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Sent message."); workItem.setWaitingForAck(); scheduler.schedule(new SendTimeout(deliveryTag), retryOptions.getTryTimeout().toMillis(), TimeUnit.MILLISECONDS); } else { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("sentMessageSize", sentMsgSize) .addKeyValue("payloadActualSize", workItem.getEncodedMessageSize()) .log("Sendlink advance failed."); DeliveryState outcome = null; if (delivery != null) { outcome = delivery.getRemoteState(); delivery.free(); } final AmqpErrorContext context = handler.getErrorContext(sender); final Throwable exception = sendException != null ? new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed. Please see cause for more details", entityPath), sendException, context) : new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed while advancing delivery(tag: %s).", entityPath, deliveryTag), context); workItem.error(exception, outcome); } } } private void processDeliveredMessage(Delivery delivery) { final DeliveryState outcome = delivery.getRemoteState(); final String deliveryTag = new String(delivery.getTag(), UTF_8); logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Process delivered message."); final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Mismatch (or send timed out)."); return; } else if (workItem.isDeliveryStateProvided()) { workItem.success(outcome); return; } if (outcome instanceof Accepted) { synchronized (errorConditionLock) { lastKnownLinkError = null; lastKnownErrorReportedAt = null; retryAttempts.set(0); } workItem.success(outcome); } else if (outcome instanceof Rejected) { final Rejected rejected = (Rejected) outcome; final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError(); final Exception exception = ExceptionUtil.toException(error.getCondition().toString(), error.getDescription(), handler.getErrorContext(sender)); logger.atWarning() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("rejected", rejected) .log("Delivery rejected."); final int retryAttempt; if (isGeneralSendError(error.getCondition())) { synchronized (errorConditionLock) { lastKnownLinkError = exception; lastKnownErrorReportedAt = Instant.now(); retryAttempt = retryAttempts.incrementAndGet(); } } else { retryAttempt = retryAttempts.get(); } final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt); if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) { cleanupFailedSend(workItem, exception, outcome); } else { workItem.setLastKnownException(exception); try { reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval); } catch (IOException | RejectedExecutionException schedulerException) { exception.initCause(schedulerException); cleanupFailedSend( workItem, new AmqpException(false, String.format(Locale.US, "Entity(%s): send operation failed while scheduling a" + " retry on Reactor, see cause for more details.", entityPath), schedulerException, handler.getErrorContext(sender)), outcome); } } } else if (outcome instanceof Released) { cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(), handler.getErrorContext(sender)), outcome); } else if (outcome instanceof Declared) { final Declared declared = (Declared) outcome; workItem.success(declared); } else { cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(), handler.getErrorContext(sender)), outcome); } } private void scheduleWorkOnDispatcher() { try { reactorProvider.getReactorDispatcher().invoke(this::processSendWork); } catch (IOException e) { logger.warning("Error scheduling work on reactor.", e); } catch (RejectedExecutionException e) { logger.info("Error scheduling work on reactor because of RejectedExecutionException."); } } private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception, final DeliveryState deliveryState) { workItem.error(exception, deliveryState); } private void completeClose() { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result).log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } } /** * Clears pending sends and puts an error in there. * * @param error Error to pass to pending sends. */ private void handleError(Throwable error) { synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed. Dropping error."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends with error."); } pendingSendsMap.forEach((key, value) -> value.error(error, null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private void handleClose() { final String message = String.format("Could not complete sends because link '%s' for '%s' is closed.", getLinkName(), entityPath); final AmqpErrorContext context = handler.getErrorContext(sender); synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends."); } pendingSendsMap.forEach((key, value) -> value.error(new AmqpException(true, message, context), null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private static boolean isGeneralSendError(Symbol amqpError) { return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR || amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED); } private static class WeightedDeliveryTag { private final String deliveryTag; private final int priority; WeightedDeliveryTag(final String deliveryTag, final int priority) { this.deliveryTag = deliveryTag; this.priority = priority; } private String getDeliveryTag() { return this.deliveryTag; } private int getPriority() { return this.priority; } } private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable { private static final long serialVersionUID = -7057500582037295635L; @Override public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) { return deliveryTag1.getPriority() - deliveryTag0.getPriority(); } } /** * Keeps track of messages that have been sent, but may not have been acknowledged by the service. */ private class SendTimeout implements Runnable { private final String deliveryTag; SendTimeout(String deliveryTag) { this.deliveryTag = deliveryTag; } @Override public void run() { final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { return; } Exception cause = lastKnownLinkError; final Exception lastError; final Instant lastErrorTime; synchronized (errorConditionLock) { lastError = lastKnownLinkError; lastErrorTime = lastKnownErrorReportedAt; } if (lastError != null && lastErrorTime != null) { final Instant now = Instant.now(); final boolean isLastErrorAfterSleepTime = lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS)); final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime; final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(retryOptions.getTryTimeout())); cause = isServerBusy || isLastErrorAfterOperationTimeout ? lastError : null; } final AmqpException exception; if (cause instanceof AmqpException) { exception = (AmqpException) cause; } else { exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR, String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath), handler.getErrorContext(sender)); } workItem.error(exception, null); } } }
class ReactorSender implements AmqpSendLink, AsyncCloseable, AutoCloseable { private static final String DELIVERY_TAG_KEY = "deliveryTag"; private static final String PENDING_SENDS_SIZE_KEY = "pending_sends_size"; private final String entityPath; private final Sender sender; private final SendLinkHandler handler; private final ReactorProvider reactorProvider; private final Disposable.Composite subscriptions; private final AtomicBoolean hasConnected = new AtomicBoolean(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final Object pendingSendLock = new Object(); private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>(); private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue = new PriorityQueue<>(1000, new DeliveryTagComparator()); private final ClientLogger logger; private final Flux<AmqpEndpointState> endpointStates; private final TokenManager tokenManager; private final MessageSerializer messageSerializer; private final AmqpRetryPolicy retry; private final AmqpRetryOptions retryOptions; private final String activeTimeoutMessage; private final Scheduler scheduler; private final AmqpMetricsProvider metricsProvider; private final Object errorConditionLock = new Object(); private volatile Exception lastKnownLinkError; private volatile Instant lastKnownErrorReportedAt; private volatile int linkSize; /** * Creates an instance of {@link ReactorSender}. * * @param amqpConnection The parent {@link AmqpConnection} that this sender lives in. * @param entityPath The message broker address for the sender. * @param sender The underlying proton-j sender. * @param handler The proton-j handler associated with the sender. * @param reactorProvider Provider to schedule work on the proton-j reactor. * @param tokenManager Token manager for authorising with the CBS node. Can be {@code null} if it is part of the * transaction manager. * @param messageSerializer Serializer to deserialise and serialize AMQP messages. * @param retryOptions Retry options. * @param scheduler Scheduler to schedule send timeout. */ ReactorSender(AmqpConnection amqpConnection, String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider, TokenManager tokenManager, MessageSerializer messageSerializer, AmqpRetryOptions retryOptions, Scheduler scheduler, AmqpMetricsProvider metricsProvider) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.sender = Objects.requireNonNull(sender, "'sender' cannot be null."); this.handler = Objects.requireNonNull(handler, "'handler' cannot be null."); this.reactorProvider = Objects.requireNonNull(reactorProvider, "'reactorProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.scheduler = Objects.requireNonNull(scheduler, "'scheduler' cannot be null."); this.retry = RetryUtil.getRetryPolicy(retryOptions); this.tokenManager = tokenManager; this.metricsProvider = metricsProvider; String connectionId = handler.getConnectionId() == null ? NOT_APPLICABLE : handler.getConnectionId(); String linkName = getLinkName() == null ? NOT_APPLICABLE : getLinkName(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(LINK_NAME_KEY, linkName); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(ReactorSender.class, loggingContext); this.activeTimeoutMessage = String.format( "ReactorSender connectionId[%s] linkName[%s]: Waiting for send and receive handler to be ACTIVE", handler.getConnectionId(), handler.getLinkName()); this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.verbose("State {}", state); this.hasConnected.set(state == EndpointState.ACTIVE); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { hasConnected.set(false); handleError(error); }) .doOnComplete(() -> { hasConnected.set(false); handleClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage), this.handler.getLinkCredits().subscribe(credit -> { logger.atVerbose().addKeyValue("credits", credit) .log("Credits on link."); this.scheduleWorkOnDispatcher(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); hasConnected.set(false); return closeAsync("Connection shutdown.", null); }).subscribe() ); if (tokenManager != null) { this.subscriptions.add(tokenManager.getAuthorizationResults().onErrorResume(error -> { final Mono<Void> operation = closeAsync(String.format("connectionId[%s] linkName[%s] Token renewal failure. Disposing send " + "link.", amqpConnection.getId(), getLinkName()), new ErrorCondition(Symbol.getSymbol(NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> { logger.atVerbose().addKeyValue("response", response) .log("Token refreshed."); }, error -> { }, () -> { logger.verbose(" Authorization completed. Disposing."); closeAsync("Authorization completed. Disposing.", null).subscribe(); })); } } @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } @Override public Mono<Void> send(Message message) { return send(message, null); } @Override public Mono<Void> send(Message message, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish message when disposed.", handler.getConnectionId(), getLinkName()))); } return getLinkSize() .flatMap(maxMessageSize -> { final int payloadSize = messageSerializer.getSize(message); final int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] bytes = new byte[allocationSize]; int encodedSize; try { encodedSize = message.encode(bytes, 0, allocationSize); } catch (BufferOverflowException exception) { final String errorMessage = String.format(Locale.US, "Error sending. Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024); final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, errorMessage, exception, handler.getErrorContext(sender)); return Mono.error(error); } return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState); }).then(); } @Override public Mono<Void> send(List<Message> messageBatch) { return send(messageBatch, null); } @Override public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish data batch when disposed.", handler.getConnectionId(), getLinkName()))); } if (messageBatch.size() == 1) { return send(messageBatch.get(0), deliveryState); } return getLinkSize() .flatMap(maxMessageSize -> { int totalEncodedSize = 0; final CompositeReadableBuffer buffer = new CompositeReadableBuffer(); final byte[] envelopBytes = batchEnvelopBytes(messageBatch.get(0), maxMessageSize); if (envelopBytes.length > 0) { totalEncodedSize += envelopBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(envelopBytes); } for (final Message message : messageBatch) { final byte[] sectionBytes = batchBinaryDataSectionBytes(message, maxMessageSize); if (sectionBytes.length > 0) { totalEncodedSize += sectionBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(sectionBytes); } else { logger.info("Ignoring the empty message org.apache.qpid.proton.message.message@{} in the batch.", Integer.toHexString(System.identityHashCode(message))); } } return send(buffer, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState); }).then(); } private byte[] batchEnvelopBytes(Message envelopMessage, int maxMessageSize) { final Message message = Proton.message(); message.setMessageAnnotations(envelopMessage.getMessageAnnotations()); if ((envelopMessage.getMessageId() instanceof String) && !CoreUtils.isNullOrEmpty((String) envelopMessage.getMessageId())) { message.setMessageId(envelopMessage.getMessageId()); } if (!CoreUtils.isNullOrEmpty(envelopMessage.getGroupId())) { message.setGroupId(envelopMessage.getGroupId()); } final int size = messageSerializer.getSize(message); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = message.encode(encodedBytes, 0, allocationSize); return Arrays.copyOf(encodedBytes, encodedSize); } private Mono<Void> batchBufferOverflowError(int maxMessageSize) { return FluxUtil.monoError(logger, new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), new BufferOverflowException(), handler.getErrorContext(sender))); } /** * Compute the encoded size when encoding a binary data of given size per Amqp 1.0 spec "amqp:data:binary" format. * * @param binaryRawSize the length of the binary data. * @return the encoded size. */ private int binaryEncodedSize(int binaryRawSize) { if (binaryRawSize <= 255) { return 5 + binaryRawSize; } else { return 8 + binaryRawSize; } } @Override public AmqpErrorContext getErrorContext() { return handler.getErrorContext(sender); } @Override public String getLinkName() { return sender.getName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override public Mono<Integer> getLinkSize() { if (linkSize > 0) { return Mono.defer(() -> Mono.just(this.linkSize)); } synchronized (this) { if (linkSize > 0) { return Mono.defer(() -> Mono.just(linkSize)); } return RetryUtil.withRetry(getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE), retryOptions, activeTimeoutMessage) .then(Mono.fromCallable(() -> { final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize(); if (remoteMaxMessageSize != null) { linkSize = remoteMaxMessageSize.intValue(); } else { logger.warning("Could not get the getRemoteMaxMessageSize. Returning current link size: {}", linkSize); } return linkSize; })); } } @Override public boolean isDisposed() { return isDisposed.get(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void dispose() { close(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } /** * Disposes of the sender. * * @param errorCondition Error condition associated with close operation. * @param message Message associated with why the sender was closed. * * @return A mono that completes when the send link has closed. */ Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return isClosedMono.asMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .log("Setting error condition and disposing. {}", message); final Runnable closeWork = () -> { if (errorCondition != null && sender.getCondition() == null) { sender.setCondition(errorCondition); } sender.close(); }; return Mono.fromRunnable(() -> { try { reactorProvider.getReactorDispatcher().invoke(closeWork); } catch (IOException e) { logger.warning("Could not schedule close work. Running manually. And completing close.", e); closeWork.run(); handleClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException scheduling close work. And completing close."); closeWork.run(); handleClose(); } }).then(isClosedMono.asMono()) .publishOn(Schedulers.boundedElastic()); } /** * A mono that completes when the sender has completely closed. * * @return mono that completes when the sender has completely closed. */ Mono<Void> isClosed() { return isClosedMono.asMono(); } @Override public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) { return onEndpointActive().then(Mono.create(sink -> { sendWork(new RetriableWorkItem(bytes, arrayOffset, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } Mono<DeliveryState> send(ReadableBuffer buffer, int messageFormat, DeliveryState deliveryState) { return onEndpointActive().then(Mono.create(sink -> { sendWork(new RetriableWorkItem(buffer, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } private Flux<EndpointState> onEndpointActive() { return RetryUtil.withRetry(handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), retryOptions, activeTimeoutMessage); } /** * Add the work item in pending send to be processed on {@link ReactorDispatcher} thread. * * @param workItem to be processed. */ private void sendWork(RetriableWorkItem workItem) { final String deliveryTag = UUID.randomUUID().toString().replace("-", ""); synchronized (pendingSendLock) { this.pendingSendsMap.put(deliveryTag, workItem); this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0)); } this.scheduleWorkOnDispatcher(); } /** * Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke() */ private void processSendWork() { if (!hasConnected.get()) { logger.warning("Not connected. Not processing send work."); return; } if (isDisposed.get()) { logger.info("Sender is closed. Not executing work."); return; } while (hasConnected.get() && sender.getCredit() > 0) { final WeightedDeliveryTag weightedDelivery; final RetriableWorkItem workItem; final String deliveryTag; synchronized (pendingSendLock) { weightedDelivery = this.pendingSendsQueue.poll(); if (weightedDelivery != null) { deliveryTag = weightedDelivery.getDeliveryTag(); workItem = this.pendingSendsMap.get(deliveryTag); } else { workItem = null; deliveryTag = null; } } if (workItem == null) { if (deliveryTag != null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("sendData not found for this delivery."); } break; } Delivery delivery = null; boolean linkAdvance = false; int sentMsgSize = 0; Exception sendException = null; try { workItem.beforeTry(); delivery = sender.delivery(deliveryTag.getBytes(UTF_8)); delivery.setMessageFormat(workItem.getMessageFormat()); if (workItem.isDeliveryStateProvided()) { delivery.disposition(workItem.getDeliveryState()); } workItem.send(sender); linkAdvance = sender.advance(); } catch (Exception exception) { sendException = exception; } if (linkAdvance) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Sent message."); workItem.setWaitingForAck(); scheduler.schedule(new SendTimeout(deliveryTag), retryOptions.getTryTimeout().toMillis(), TimeUnit.MILLISECONDS); } else { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("sentMessageSize", sentMsgSize) .addKeyValue("payloadActualSize", workItem.getEncodedMessageSize()) .log("Sendlink advance failed."); DeliveryState outcome = null; if (delivery != null) { outcome = delivery.getRemoteState(); delivery.free(); } final AmqpErrorContext context = handler.getErrorContext(sender); final Throwable exception = sendException != null ? new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed. Please see cause for more details", entityPath), sendException, context) : new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed while advancing delivery(tag: %s).", entityPath, deliveryTag), context); workItem.error(exception, outcome); } } } private void processDeliveredMessage(Delivery delivery) { final DeliveryState outcome = delivery.getRemoteState(); final String deliveryTag = new String(delivery.getTag(), UTF_8); logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Process delivered message."); final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Mismatch (or send timed out)."); return; } else if (workItem.isDeliveryStateProvided()) { workItem.success(outcome); return; } if (outcome instanceof Accepted) { synchronized (errorConditionLock) { lastKnownLinkError = null; lastKnownErrorReportedAt = null; retryAttempts.set(0); } workItem.success(outcome); } else if (outcome instanceof Rejected) { final Rejected rejected = (Rejected) outcome; final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError(); final Exception exception = ExceptionUtil.toException(error.getCondition().toString(), error.getDescription(), handler.getErrorContext(sender)); logger.atWarning() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("rejected", rejected) .log("Delivery rejected."); final int retryAttempt; if (isGeneralSendError(error.getCondition())) { synchronized (errorConditionLock) { lastKnownLinkError = exception; lastKnownErrorReportedAt = Instant.now(); retryAttempt = retryAttempts.incrementAndGet(); } } else { retryAttempt = retryAttempts.get(); } final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt); if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) { cleanupFailedSend(workItem, exception, outcome); } else { workItem.setLastKnownException(exception); try { reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval); } catch (IOException | RejectedExecutionException schedulerException) { exception.initCause(schedulerException); cleanupFailedSend( workItem, new AmqpException(false, String.format(Locale.US, "Entity(%s): send operation failed while scheduling a" + " retry on Reactor, see cause for more details.", entityPath), schedulerException, handler.getErrorContext(sender)), outcome); } } } else if (outcome instanceof Released) { cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(), handler.getErrorContext(sender)), outcome); } else if (outcome instanceof Declared) { final Declared declared = (Declared) outcome; workItem.success(declared); } else { cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(), handler.getErrorContext(sender)), outcome); } } private void scheduleWorkOnDispatcher() { try { reactorProvider.getReactorDispatcher().invoke(this::processSendWork); } catch (IOException e) { logger.warning("Error scheduling work on reactor.", e); } catch (RejectedExecutionException e) { logger.info("Error scheduling work on reactor because of RejectedExecutionException."); } } private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception, final DeliveryState deliveryState) { workItem.error(exception, deliveryState); } private void completeClose() { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result).log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } } /** * Clears pending sends and puts an error in there. * * @param error Error to pass to pending sends. */ private void handleError(Throwable error) { synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed. Dropping error."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends with error."); } pendingSendsMap.forEach((key, value) -> value.error(error, null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private void handleClose() { final String message = String.format("Could not complete sends because link '%s' for '%s' is closed.", getLinkName(), entityPath); final AmqpErrorContext context = handler.getErrorContext(sender); synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends."); } pendingSendsMap.forEach((key, value) -> value.error(new AmqpException(true, message, context), null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private static boolean isGeneralSendError(Symbol amqpError) { return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR || amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED); } private static class WeightedDeliveryTag { private final String deliveryTag; private final int priority; WeightedDeliveryTag(final String deliveryTag, final int priority) { this.deliveryTag = deliveryTag; this.priority = priority; } private String getDeliveryTag() { return this.deliveryTag; } private int getPriority() { return this.priority; } } private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable { private static final long serialVersionUID = -7057500582037295635L; @Override public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) { return deliveryTag1.getPriority() - deliveryTag0.getPriority(); } } /** * Keeps track of messages that have been sent, but may not have been acknowledged by the service. */ private class SendTimeout implements Runnable { private final String deliveryTag; SendTimeout(String deliveryTag) { this.deliveryTag = deliveryTag; } @Override public void run() { final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { return; } Exception cause = lastKnownLinkError; final Exception lastError; final Instant lastErrorTime; synchronized (errorConditionLock) { lastError = lastKnownLinkError; lastErrorTime = lastKnownErrorReportedAt; } if (lastError != null && lastErrorTime != null) { final Instant now = Instant.now(); final boolean isLastErrorAfterSleepTime = lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS)); final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime; final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(retryOptions.getTryTimeout())); cause = isServerBusy || isLastErrorAfterOperationTimeout ? lastError : null; } final AmqpException exception; if (cause instanceof AmqpException) { exception = (AmqpException) cause; } else { exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR, String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath), handler.getErrorContext(sender)); } workItem.error(exception, null); } } }
nit: should we also add a log record with [monoError](https://github.com/Azure/azure-sdk-for-java/blob/527b28b969a1bea28970e7d3b54a10c2cdca8503/sdk/core/azure-core/src/main/java/com/azure/core/util/FluxUtil.java#L397) and context?
private int binaryEncodedSize(int binaryRawSize) { if (binaryRawSize <= 255) { return 5 + binaryRawSize; } else { return 8 + binaryRawSize; } }
private int binaryEncodedSize(int binaryRawSize) { if (binaryRawSize <= 255) { return 5 + binaryRawSize; } else { return 8 + binaryRawSize; } }
class ReactorSender implements AmqpSendLink, AsyncCloseable, AutoCloseable { private static final String DELIVERY_TAG_KEY = "deliveryTag"; private static final String PENDING_SENDS_SIZE_KEY = "pending_sends_size"; private final String entityPath; private final Sender sender; private final SendLinkHandler handler; private final ReactorProvider reactorProvider; private final Disposable.Composite subscriptions; private final AtomicBoolean hasConnected = new AtomicBoolean(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final Object pendingSendLock = new Object(); private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>(); private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue = new PriorityQueue<>(1000, new DeliveryTagComparator()); private final ClientLogger logger; private final Flux<AmqpEndpointState> endpointStates; private final TokenManager tokenManager; private final MessageSerializer messageSerializer; private final AmqpRetryPolicy retry; private final AmqpRetryOptions retryOptions; private final String activeTimeoutMessage; private final Scheduler scheduler; private final AmqpMetricsProvider metricsProvider; private final Object errorConditionLock = new Object(); private volatile Exception lastKnownLinkError; private volatile Instant lastKnownErrorReportedAt; private volatile int linkSize; /** * Creates an instance of {@link ReactorSender}. * * @param amqpConnection The parent {@link AmqpConnection} that this sender lives in. * @param entityPath The message broker address for the sender. * @param sender The underlying proton-j sender. * @param handler The proton-j handler associated with the sender. * @param reactorProvider Provider to schedule work on the proton-j reactor. * @param tokenManager Token manager for authorising with the CBS node. Can be {@code null} if it is part of the * transaction manager. * @param messageSerializer Serializer to deserialise and serialize AMQP messages. * @param retryOptions Retry options. * @param scheduler Scheduler to schedule send timeout. */ ReactorSender(AmqpConnection amqpConnection, String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider, TokenManager tokenManager, MessageSerializer messageSerializer, AmqpRetryOptions retryOptions, Scheduler scheduler, AmqpMetricsProvider metricsProvider) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.sender = Objects.requireNonNull(sender, "'sender' cannot be null."); this.handler = Objects.requireNonNull(handler, "'handler' cannot be null."); this.reactorProvider = Objects.requireNonNull(reactorProvider, "'reactorProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.scheduler = Objects.requireNonNull(scheduler, "'scheduler' cannot be null."); this.retry = RetryUtil.getRetryPolicy(retryOptions); this.tokenManager = tokenManager; this.metricsProvider = metricsProvider; String connectionId = handler.getConnectionId() == null ? NOT_APPLICABLE : handler.getConnectionId(); String linkName = getLinkName() == null ? NOT_APPLICABLE : getLinkName(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(LINK_NAME_KEY, linkName); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(ReactorSender.class, loggingContext); this.activeTimeoutMessage = String.format( "ReactorSender connectionId[%s] linkName[%s]: Waiting for send and receive handler to be ACTIVE", handler.getConnectionId(), handler.getLinkName()); this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.verbose("State {}", state); this.hasConnected.set(state == EndpointState.ACTIVE); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { hasConnected.set(false); handleError(error); }) .doOnComplete(() -> { hasConnected.set(false); handleClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage), this.handler.getLinkCredits().subscribe(credit -> { logger.atVerbose().addKeyValue("credits", credit) .log("Credits on link."); this.scheduleWorkOnDispatcher(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); hasConnected.set(false); return closeAsync("Connection shutdown.", null); }).subscribe() ); if (tokenManager != null) { this.subscriptions.add(tokenManager.getAuthorizationResults().onErrorResume(error -> { final Mono<Void> operation = closeAsync(String.format("connectionId[%s] linkName[%s] Token renewal failure. Disposing send " + "link.", amqpConnection.getId(), getLinkName()), new ErrorCondition(Symbol.getSymbol(NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> { logger.atVerbose().addKeyValue("response", response) .log("Token refreshed."); }, error -> { }, () -> { logger.verbose(" Authorization completed. Disposing."); closeAsync("Authorization completed. Disposing.", null).subscribe(); })); } } @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } @Override public Mono<Void> send(Message message) { return send(message, null); } @Override public Mono<Void> send(Message message, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish message when disposed.", handler.getConnectionId(), getLinkName()))); } return getLinkSize() .flatMap(maxMessageSize -> { final int payloadSize = messageSerializer.getSize(message); final int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] bytes = new byte[allocationSize]; int encodedSize; try { encodedSize = message.encode(bytes, 0, allocationSize); } catch (BufferOverflowException exception) { final String errorMessage = String.format(Locale.US, "Error sending. Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024); final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, errorMessage, exception, handler.getErrorContext(sender)); return Mono.error(error); } return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState); }).then(); } @Override public Mono<Void> send(List<Message> messageBatch) { return send(messageBatch, null); } @Override public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish data batch when disposed.", handler.getConnectionId(), getLinkName()))); } if (messageBatch.size() == 1) { return send(messageBatch.get(0), deliveryState); } return getLinkSize() .flatMap(maxMessageSize -> { int totalEncodedSize = 0; final CompositeReadableBuffer buffer = new CompositeReadableBuffer(); final byte[] envelopBytes = batchEnvelopBytes(messageBatch.get(0), maxMessageSize); if (envelopBytes.length > 0) { totalEncodedSize += envelopBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(envelopBytes); } for (final Message message : messageBatch) { final byte[] sectionBytes = batchBinaryDataSectionBytes(message, maxMessageSize); if (sectionBytes.length > 0) { totalEncodedSize += sectionBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(sectionBytes); } } return send(buffer, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState); }).then(); } private byte[] batchEnvelopBytes(Message envelopMessage, int maxMessageSize) { final Message message = Proton.message(); message.setMessageAnnotations(envelopMessage.getMessageAnnotations()); final int size = messageSerializer.getSize(message); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = message.encode(encodedBytes, 0, allocationSize); return Arrays.copyOf(encodedBytes, encodedSize); } private byte[] batchBinaryDataSectionBytes(Message sectionMessage, int maxMessageSize) { final int size = messageSerializer.getSize(sectionMessage); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = sectionMessage.encode(encodedBytes, 0, allocationSize); final Message message = Proton.message(); final Data binaryData = new Data(new Binary(encodedBytes, 0, encodedSize)); message.setBody(binaryData); final int binaryRawSize = binaryData.getValue().getLength(); final int binaryEncodedSize = binaryEncodedSize(binaryRawSize); final byte[] binaryEncodedBytes = new byte[binaryEncodedSize]; message.encode(binaryEncodedBytes, 0, binaryEncodedSize); return binaryEncodedBytes; } private Mono<Void> batchBufferOverflowError(int maxMessageSize) { return Mono.error(new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), new BufferOverflowException(), handler.getErrorContext(sender))); } /** * Compute the encoded size when encoding a binary data of given size per Amqp 1.0 spec "amqp:data:binary" format. * * @param binaryRawSize the length of the binary data. * @return the encoded size. */ @Override public AmqpErrorContext getErrorContext() { return handler.getErrorContext(sender); } @Override public String getLinkName() { return sender.getName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override public Mono<Integer> getLinkSize() { if (linkSize > 0) { return Mono.defer(() -> Mono.just(this.linkSize)); } synchronized (this) { if (linkSize > 0) { return Mono.defer(() -> Mono.just(linkSize)); } return RetryUtil.withRetry(getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE), retryOptions, activeTimeoutMessage) .then(Mono.fromCallable(() -> { final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize(); if (remoteMaxMessageSize != null) { linkSize = remoteMaxMessageSize.intValue(); } else { logger.warning("Could not get the getRemoteMaxMessageSize. Returning current link size: {}", linkSize); } return linkSize; })); } } @Override public boolean isDisposed() { return isDisposed.get(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void dispose() { close(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } /** * Disposes of the sender. * * @param errorCondition Error condition associated with close operation. * @param message Message associated with why the sender was closed. * * @return A mono that completes when the send link has closed. */ Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return isClosedMono.asMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .log("Setting error condition and disposing. {}", message); final Runnable closeWork = () -> { if (errorCondition != null && sender.getCondition() == null) { sender.setCondition(errorCondition); } sender.close(); }; return Mono.fromRunnable(() -> { try { reactorProvider.getReactorDispatcher().invoke(closeWork); } catch (IOException e) { logger.warning("Could not schedule close work. Running manually. And completing close.", e); closeWork.run(); handleClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException scheduling close work. And completing close."); closeWork.run(); handleClose(); } }).then(isClosedMono.asMono()) .publishOn(Schedulers.boundedElastic()); } /** * A mono that completes when the sender has completely closed. * * @return mono that completes when the sender has completely closed. */ Mono<Void> isClosed() { return isClosedMono.asMono(); } @Override public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) { return onEndpointActive().then(Mono.create(sink -> { sendWork(new RetriableWorkItem(bytes, arrayOffset, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } Mono<DeliveryState> send(ReadableBuffer buffer, int messageFormat, DeliveryState deliveryState) { return onEndpointActive().then(Mono.create(sink -> { sendWork(new RetriableWorkItem(buffer, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } private Flux<EndpointState> onEndpointActive() { return RetryUtil.withRetry(handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), retryOptions, activeTimeoutMessage); } /** * Add the work item in pending send to be processed on {@link ReactorDispatcher} thread. * * @param workItem to be processed. */ private void sendWork(RetriableWorkItem workItem) { final String deliveryTag = UUID.randomUUID().toString().replace("-", ""); synchronized (pendingSendLock) { this.pendingSendsMap.put(deliveryTag, workItem); this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0)); } this.scheduleWorkOnDispatcher(); } /** * Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke() */ private void processSendWork() { if (!hasConnected.get()) { logger.warning("Not connected. Not processing send work."); return; } if (isDisposed.get()) { logger.info("Sender is closed. Not executing work."); return; } while (hasConnected.get() && sender.getCredit() > 0) { final WeightedDeliveryTag weightedDelivery; final RetriableWorkItem workItem; final String deliveryTag; synchronized (pendingSendLock) { weightedDelivery = this.pendingSendsQueue.poll(); if (weightedDelivery != null) { deliveryTag = weightedDelivery.getDeliveryTag(); workItem = this.pendingSendsMap.get(deliveryTag); } else { workItem = null; deliveryTag = null; } } if (workItem == null) { if (deliveryTag != null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("sendData not found for this delivery."); } break; } Delivery delivery = null; boolean linkAdvance = false; int sentMsgSize = 0; Exception sendException = null; try { workItem.beforeTry(); delivery = sender.delivery(deliveryTag.getBytes(UTF_8)); delivery.setMessageFormat(workItem.getMessageFormat()); if (workItem.isDeliveryStateProvided()) { delivery.disposition(workItem.getDeliveryState()); } workItem.send(sender); linkAdvance = sender.advance(); } catch (Exception exception) { sendException = exception; } if (linkAdvance) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Sent message."); workItem.setWaitingForAck(); scheduler.schedule(new SendTimeout(deliveryTag), retryOptions.getTryTimeout().toMillis(), TimeUnit.MILLISECONDS); } else { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("sentMessageSize", sentMsgSize) .addKeyValue("payloadActualSize", workItem.getEncodedMessageSize()) .log("Sendlink advance failed."); DeliveryState outcome = null; if (delivery != null) { outcome = delivery.getRemoteState(); delivery.free(); } final AmqpErrorContext context = handler.getErrorContext(sender); final Throwable exception = sendException != null ? new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed. Please see cause for more details", entityPath), sendException, context) : new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed while advancing delivery(tag: %s).", entityPath, deliveryTag), context); workItem.error(exception, outcome); } } } private void processDeliveredMessage(Delivery delivery) { final DeliveryState outcome = delivery.getRemoteState(); final String deliveryTag = new String(delivery.getTag(), UTF_8); logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Process delivered message."); final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Mismatch (or send timed out)."); return; } else if (workItem.isDeliveryStateProvided()) { workItem.success(outcome); return; } if (outcome instanceof Accepted) { synchronized (errorConditionLock) { lastKnownLinkError = null; lastKnownErrorReportedAt = null; retryAttempts.set(0); } workItem.success(outcome); } else if (outcome instanceof Rejected) { final Rejected rejected = (Rejected) outcome; final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError(); final Exception exception = ExceptionUtil.toException(error.getCondition().toString(), error.getDescription(), handler.getErrorContext(sender)); logger.atWarning() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("rejected", rejected) .log("Delivery rejected."); final int retryAttempt; if (isGeneralSendError(error.getCondition())) { synchronized (errorConditionLock) { lastKnownLinkError = exception; lastKnownErrorReportedAt = Instant.now(); retryAttempt = retryAttempts.incrementAndGet(); } } else { retryAttempt = retryAttempts.get(); } final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt); if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) { cleanupFailedSend(workItem, exception, outcome); } else { workItem.setLastKnownException(exception); try { reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval); } catch (IOException | RejectedExecutionException schedulerException) { exception.initCause(schedulerException); cleanupFailedSend( workItem, new AmqpException(false, String.format(Locale.US, "Entity(%s): send operation failed while scheduling a" + " retry on Reactor, see cause for more details.", entityPath), schedulerException, handler.getErrorContext(sender)), outcome); } } } else if (outcome instanceof Released) { cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(), handler.getErrorContext(sender)), outcome); } else if (outcome instanceof Declared) { final Declared declared = (Declared) outcome; workItem.success(declared); } else { cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(), handler.getErrorContext(sender)), outcome); } } private void scheduleWorkOnDispatcher() { try { reactorProvider.getReactorDispatcher().invoke(this::processSendWork); } catch (IOException e) { logger.warning("Error scheduling work on reactor.", e); } catch (RejectedExecutionException e) { logger.info("Error scheduling work on reactor because of RejectedExecutionException."); } } private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception, final DeliveryState deliveryState) { workItem.error(exception, deliveryState); } private void completeClose() { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result).log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } } /** * Clears pending sends and puts an error in there. * * @param error Error to pass to pending sends. */ private void handleError(Throwable error) { synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed. Dropping error."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends with error."); } pendingSendsMap.forEach((key, value) -> value.error(error, null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private void handleClose() { final String message = String.format("Could not complete sends because link '%s' for '%s' is closed.", getLinkName(), entityPath); final AmqpErrorContext context = handler.getErrorContext(sender); synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends."); } pendingSendsMap.forEach((key, value) -> value.error(new AmqpException(true, message, context), null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private static boolean isGeneralSendError(Symbol amqpError) { return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR || amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED); } private static class WeightedDeliveryTag { private final String deliveryTag; private final int priority; WeightedDeliveryTag(final String deliveryTag, final int priority) { this.deliveryTag = deliveryTag; this.priority = priority; } private String getDeliveryTag() { return this.deliveryTag; } private int getPriority() { return this.priority; } } private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable { private static final long serialVersionUID = -7057500582037295635L; @Override public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) { return deliveryTag1.getPriority() - deliveryTag0.getPriority(); } } /** * Keeps track of messages that have been sent, but may not have been acknowledged by the service. */ private class SendTimeout implements Runnable { private final String deliveryTag; SendTimeout(String deliveryTag) { this.deliveryTag = deliveryTag; } @Override public void run() { final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { return; } Exception cause = lastKnownLinkError; final Exception lastError; final Instant lastErrorTime; synchronized (errorConditionLock) { lastError = lastKnownLinkError; lastErrorTime = lastKnownErrorReportedAt; } if (lastError != null && lastErrorTime != null) { final Instant now = Instant.now(); final boolean isLastErrorAfterSleepTime = lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS)); final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime; final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(retryOptions.getTryTimeout())); cause = isServerBusy || isLastErrorAfterOperationTimeout ? lastError : null; } final AmqpException exception; if (cause instanceof AmqpException) { exception = (AmqpException) cause; } else { exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR, String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath), handler.getErrorContext(sender)); } workItem.error(exception, null); } } }
class ReactorSender implements AmqpSendLink, AsyncCloseable, AutoCloseable { private static final String DELIVERY_TAG_KEY = "deliveryTag"; private static final String PENDING_SENDS_SIZE_KEY = "pending_sends_size"; private final String entityPath; private final Sender sender; private final SendLinkHandler handler; private final ReactorProvider reactorProvider; private final Disposable.Composite subscriptions; private final AtomicBoolean hasConnected = new AtomicBoolean(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final Object pendingSendLock = new Object(); private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>(); private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue = new PriorityQueue<>(1000, new DeliveryTagComparator()); private final ClientLogger logger; private final Flux<AmqpEndpointState> endpointStates; private final TokenManager tokenManager; private final MessageSerializer messageSerializer; private final AmqpRetryPolicy retry; private final AmqpRetryOptions retryOptions; private final String activeTimeoutMessage; private final Scheduler scheduler; private final AmqpMetricsProvider metricsProvider; private final Object errorConditionLock = new Object(); private volatile Exception lastKnownLinkError; private volatile Instant lastKnownErrorReportedAt; private volatile int linkSize; /** * Creates an instance of {@link ReactorSender}. * * @param amqpConnection The parent {@link AmqpConnection} that this sender lives in. * @param entityPath The message broker address for the sender. * @param sender The underlying proton-j sender. * @param handler The proton-j handler associated with the sender. * @param reactorProvider Provider to schedule work on the proton-j reactor. * @param tokenManager Token manager for authorising with the CBS node. Can be {@code null} if it is part of the * transaction manager. * @param messageSerializer Serializer to deserialise and serialize AMQP messages. * @param retryOptions Retry options. * @param scheduler Scheduler to schedule send timeout. */ ReactorSender(AmqpConnection amqpConnection, String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider, TokenManager tokenManager, MessageSerializer messageSerializer, AmqpRetryOptions retryOptions, Scheduler scheduler, AmqpMetricsProvider metricsProvider) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.sender = Objects.requireNonNull(sender, "'sender' cannot be null."); this.handler = Objects.requireNonNull(handler, "'handler' cannot be null."); this.reactorProvider = Objects.requireNonNull(reactorProvider, "'reactorProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.scheduler = Objects.requireNonNull(scheduler, "'scheduler' cannot be null."); this.retry = RetryUtil.getRetryPolicy(retryOptions); this.tokenManager = tokenManager; this.metricsProvider = metricsProvider; String connectionId = handler.getConnectionId() == null ? NOT_APPLICABLE : handler.getConnectionId(); String linkName = getLinkName() == null ? NOT_APPLICABLE : getLinkName(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(LINK_NAME_KEY, linkName); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(ReactorSender.class, loggingContext); this.activeTimeoutMessage = String.format( "ReactorSender connectionId[%s] linkName[%s]: Waiting for send and receive handler to be ACTIVE", handler.getConnectionId(), handler.getLinkName()); this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.verbose("State {}", state); this.hasConnected.set(state == EndpointState.ACTIVE); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { hasConnected.set(false); handleError(error); }) .doOnComplete(() -> { hasConnected.set(false); handleClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage), this.handler.getLinkCredits().subscribe(credit -> { logger.atVerbose().addKeyValue("credits", credit) .log("Credits on link."); this.scheduleWorkOnDispatcher(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); hasConnected.set(false); return closeAsync("Connection shutdown.", null); }).subscribe() ); if (tokenManager != null) { this.subscriptions.add(tokenManager.getAuthorizationResults().onErrorResume(error -> { final Mono<Void> operation = closeAsync(String.format("connectionId[%s] linkName[%s] Token renewal failure. Disposing send " + "link.", amqpConnection.getId(), getLinkName()), new ErrorCondition(Symbol.getSymbol(NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> { logger.atVerbose().addKeyValue("response", response) .log("Token refreshed."); }, error -> { }, () -> { logger.verbose(" Authorization completed. Disposing."); closeAsync("Authorization completed. Disposing.", null).subscribe(); })); } } @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } @Override public Mono<Void> send(Message message) { return send(message, null); } @Override public Mono<Void> send(Message message, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish message when disposed.", handler.getConnectionId(), getLinkName()))); } return getLinkSize() .flatMap(maxMessageSize -> { final int payloadSize = messageSerializer.getSize(message); final int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] bytes = new byte[allocationSize]; int encodedSize; try { encodedSize = message.encode(bytes, 0, allocationSize); } catch (BufferOverflowException exception) { final String errorMessage = String.format(Locale.US, "Error sending. Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024); final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, errorMessage, exception, handler.getErrorContext(sender)); return Mono.error(error); } return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState); }).then(); } @Override public Mono<Void> send(List<Message> messageBatch) { return send(messageBatch, null); } @Override public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish data batch when disposed.", handler.getConnectionId(), getLinkName()))); } if (messageBatch.size() == 1) { return send(messageBatch.get(0), deliveryState); } return getLinkSize() .flatMap(maxMessageSize -> { int totalEncodedSize = 0; final CompositeReadableBuffer buffer = new CompositeReadableBuffer(); final byte[] envelopBytes = batchEnvelopBytes(messageBatch.get(0), maxMessageSize); if (envelopBytes.length > 0) { totalEncodedSize += envelopBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(envelopBytes); } for (final Message message : messageBatch) { final byte[] sectionBytes = batchBinaryDataSectionBytes(message, maxMessageSize); if (sectionBytes.length > 0) { totalEncodedSize += sectionBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(sectionBytes); } else { logger.info("Ignoring the empty message org.apache.qpid.proton.message.message@{} in the batch.", Integer.toHexString(System.identityHashCode(message))); } } return send(buffer, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState); }).then(); } private byte[] batchEnvelopBytes(Message envelopMessage, int maxMessageSize) { final Message message = Proton.message(); message.setMessageAnnotations(envelopMessage.getMessageAnnotations()); if ((envelopMessage.getMessageId() instanceof String) && !CoreUtils.isNullOrEmpty((String) envelopMessage.getMessageId())) { message.setMessageId(envelopMessage.getMessageId()); } if (!CoreUtils.isNullOrEmpty(envelopMessage.getGroupId())) { message.setGroupId(envelopMessage.getGroupId()); } final int size = messageSerializer.getSize(message); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = message.encode(encodedBytes, 0, allocationSize); return Arrays.copyOf(encodedBytes, encodedSize); } private byte[] batchBinaryDataSectionBytes(Message sectionMessage, int maxMessageSize) { final int size = messageSerializer.getSize(sectionMessage); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = sectionMessage.encode(encodedBytes, 0, allocationSize); final Message message = Proton.message(); final Data binaryData = new Data(new Binary(encodedBytes, 0, encodedSize)); message.setBody(binaryData); final int binaryRawSize = binaryData.getValue().getLength(); final int binaryEncodedSize = binaryEncodedSize(binaryRawSize); final byte[] binaryEncodedBytes = new byte[binaryEncodedSize]; message.encode(binaryEncodedBytes, 0, binaryEncodedSize); return binaryEncodedBytes; } private Mono<Void> batchBufferOverflowError(int maxMessageSize) { return FluxUtil.monoError(logger, new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), new BufferOverflowException(), handler.getErrorContext(sender))); } /** * Compute the encoded size when encoding a binary data of given size per Amqp 1.0 spec "amqp:data:binary" format. * * @param binaryRawSize the length of the binary data. * @return the encoded size. */ @Override public AmqpErrorContext getErrorContext() { return handler.getErrorContext(sender); } @Override public String getLinkName() { return sender.getName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override public Mono<Integer> getLinkSize() { if (linkSize > 0) { return Mono.defer(() -> Mono.just(this.linkSize)); } synchronized (this) { if (linkSize > 0) { return Mono.defer(() -> Mono.just(linkSize)); } return RetryUtil.withRetry(getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE), retryOptions, activeTimeoutMessage) .then(Mono.fromCallable(() -> { final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize(); if (remoteMaxMessageSize != null) { linkSize = remoteMaxMessageSize.intValue(); } else { logger.warning("Could not get the getRemoteMaxMessageSize. Returning current link size: {}", linkSize); } return linkSize; })); } } @Override public boolean isDisposed() { return isDisposed.get(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void dispose() { close(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } /** * Disposes of the sender. * * @param errorCondition Error condition associated with close operation. * @param message Message associated with why the sender was closed. * * @return A mono that completes when the send link has closed. */ Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return isClosedMono.asMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .log("Setting error condition and disposing. {}", message); final Runnable closeWork = () -> { if (errorCondition != null && sender.getCondition() == null) { sender.setCondition(errorCondition); } sender.close(); }; return Mono.fromRunnable(() -> { try { reactorProvider.getReactorDispatcher().invoke(closeWork); } catch (IOException e) { logger.warning("Could not schedule close work. Running manually. And completing close.", e); closeWork.run(); handleClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException scheduling close work. And completing close."); closeWork.run(); handleClose(); } }).then(isClosedMono.asMono()) .publishOn(Schedulers.boundedElastic()); } /** * A mono that completes when the sender has completely closed. * * @return mono that completes when the sender has completely closed. */ Mono<Void> isClosed() { return isClosedMono.asMono(); } @Override public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) { return onEndpointActive().then(Mono.create(sink -> { sendWork(new RetriableWorkItem(bytes, arrayOffset, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } Mono<DeliveryState> send(ReadableBuffer buffer, int messageFormat, DeliveryState deliveryState) { return onEndpointActive().then(Mono.create(sink -> { sendWork(new RetriableWorkItem(buffer, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } private Flux<EndpointState> onEndpointActive() { return RetryUtil.withRetry(handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), retryOptions, activeTimeoutMessage); } /** * Add the work item in pending send to be processed on {@link ReactorDispatcher} thread. * * @param workItem to be processed. */ private void sendWork(RetriableWorkItem workItem) { final String deliveryTag = UUID.randomUUID().toString().replace("-", ""); synchronized (pendingSendLock) { this.pendingSendsMap.put(deliveryTag, workItem); this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0)); } this.scheduleWorkOnDispatcher(); } /** * Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke() */ private void processSendWork() { if (!hasConnected.get()) { logger.warning("Not connected. Not processing send work."); return; } if (isDisposed.get()) { logger.info("Sender is closed. Not executing work."); return; } while (hasConnected.get() && sender.getCredit() > 0) { final WeightedDeliveryTag weightedDelivery; final RetriableWorkItem workItem; final String deliveryTag; synchronized (pendingSendLock) { weightedDelivery = this.pendingSendsQueue.poll(); if (weightedDelivery != null) { deliveryTag = weightedDelivery.getDeliveryTag(); workItem = this.pendingSendsMap.get(deliveryTag); } else { workItem = null; deliveryTag = null; } } if (workItem == null) { if (deliveryTag != null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("sendData not found for this delivery."); } break; } Delivery delivery = null; boolean linkAdvance = false; int sentMsgSize = 0; Exception sendException = null; try { workItem.beforeTry(); delivery = sender.delivery(deliveryTag.getBytes(UTF_8)); delivery.setMessageFormat(workItem.getMessageFormat()); if (workItem.isDeliveryStateProvided()) { delivery.disposition(workItem.getDeliveryState()); } workItem.send(sender); linkAdvance = sender.advance(); } catch (Exception exception) { sendException = exception; } if (linkAdvance) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Sent message."); workItem.setWaitingForAck(); scheduler.schedule(new SendTimeout(deliveryTag), retryOptions.getTryTimeout().toMillis(), TimeUnit.MILLISECONDS); } else { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("sentMessageSize", sentMsgSize) .addKeyValue("payloadActualSize", workItem.getEncodedMessageSize()) .log("Sendlink advance failed."); DeliveryState outcome = null; if (delivery != null) { outcome = delivery.getRemoteState(); delivery.free(); } final AmqpErrorContext context = handler.getErrorContext(sender); final Throwable exception = sendException != null ? new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed. Please see cause for more details", entityPath), sendException, context) : new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed while advancing delivery(tag: %s).", entityPath, deliveryTag), context); workItem.error(exception, outcome); } } } private void processDeliveredMessage(Delivery delivery) { final DeliveryState outcome = delivery.getRemoteState(); final String deliveryTag = new String(delivery.getTag(), UTF_8); logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Process delivered message."); final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Mismatch (or send timed out)."); return; } else if (workItem.isDeliveryStateProvided()) { workItem.success(outcome); return; } if (outcome instanceof Accepted) { synchronized (errorConditionLock) { lastKnownLinkError = null; lastKnownErrorReportedAt = null; retryAttempts.set(0); } workItem.success(outcome); } else if (outcome instanceof Rejected) { final Rejected rejected = (Rejected) outcome; final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError(); final Exception exception = ExceptionUtil.toException(error.getCondition().toString(), error.getDescription(), handler.getErrorContext(sender)); logger.atWarning() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("rejected", rejected) .log("Delivery rejected."); final int retryAttempt; if (isGeneralSendError(error.getCondition())) { synchronized (errorConditionLock) { lastKnownLinkError = exception; lastKnownErrorReportedAt = Instant.now(); retryAttempt = retryAttempts.incrementAndGet(); } } else { retryAttempt = retryAttempts.get(); } final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt); if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) { cleanupFailedSend(workItem, exception, outcome); } else { workItem.setLastKnownException(exception); try { reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval); } catch (IOException | RejectedExecutionException schedulerException) { exception.initCause(schedulerException); cleanupFailedSend( workItem, new AmqpException(false, String.format(Locale.US, "Entity(%s): send operation failed while scheduling a" + " retry on Reactor, see cause for more details.", entityPath), schedulerException, handler.getErrorContext(sender)), outcome); } } } else if (outcome instanceof Released) { cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(), handler.getErrorContext(sender)), outcome); } else if (outcome instanceof Declared) { final Declared declared = (Declared) outcome; workItem.success(declared); } else { cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(), handler.getErrorContext(sender)), outcome); } } private void scheduleWorkOnDispatcher() { try { reactorProvider.getReactorDispatcher().invoke(this::processSendWork); } catch (IOException e) { logger.warning("Error scheduling work on reactor.", e); } catch (RejectedExecutionException e) { logger.info("Error scheduling work on reactor because of RejectedExecutionException."); } } private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception, final DeliveryState deliveryState) { workItem.error(exception, deliveryState); } private void completeClose() { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result).log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } } /** * Clears pending sends and puts an error in there. * * @param error Error to pass to pending sends. */ private void handleError(Throwable error) { synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed. Dropping error."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends with error."); } pendingSendsMap.forEach((key, value) -> value.error(error, null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private void handleClose() { final String message = String.format("Could not complete sends because link '%s' for '%s' is closed.", getLinkName(), entityPath); final AmqpErrorContext context = handler.getErrorContext(sender); synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends."); } pendingSendsMap.forEach((key, value) -> value.error(new AmqpException(true, message, context), null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private static boolean isGeneralSendError(Symbol amqpError) { return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR || amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED); } private static class WeightedDeliveryTag { private final String deliveryTag; private final int priority; WeightedDeliveryTag(final String deliveryTag, final int priority) { this.deliveryTag = deliveryTag; this.priority = priority; } private String getDeliveryTag() { return this.deliveryTag; } private int getPriority() { return this.priority; } } private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable { private static final long serialVersionUID = -7057500582037295635L; @Override public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) { return deliveryTag1.getPriority() - deliveryTag0.getPriority(); } } /** * Keeps track of messages that have been sent, but may not have been acknowledged by the service. */ private class SendTimeout implements Runnable { private final String deliveryTag; SendTimeout(String deliveryTag) { this.deliveryTag = deliveryTag; } @Override public void run() { final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { return; } Exception cause = lastKnownLinkError; final Exception lastError; final Instant lastErrorTime; synchronized (errorConditionLock) { lastError = lastKnownLinkError; lastErrorTime = lastKnownErrorReportedAt; } if (lastError != null && lastErrorTime != null) { final Instant now = Instant.now(); final boolean isLastErrorAfterSleepTime = lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS)); final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime; final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(retryOptions.getTryTimeout())); cause = isServerBusy || isLastErrorAfterOperationTimeout ? lastError : null; } final AmqpException exception; if (cause instanceof AmqpException) { exception = (AmqpException) cause; } else { exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR, String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath), handler.getErrorContext(sender)); } workItem.error(exception, null); } } }
Unfortunately, we cannot use the limit applied [Heap]ByteBuffer here, as the CompositeReadableBuffer (CRB) has no API to append ByteBuffer. CRB expects all chunks it wraps to be a byte[] with the exact encoded size.
private byte[] batchBinaryDataSectionBytes(Message sectionMessage, int maxMessageSize) { final int size = messageSerializer.getSize(sectionMessage); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = sectionMessage.encode(encodedBytes, 0, allocationSize); final Message message = Proton.message(); final Data binaryData = new Data(new Binary(encodedBytes, 0, encodedSize)); message.setBody(binaryData); final int binaryRawSize = binaryData.getValue().getLength(); final int binaryEncodedSize = binaryEncodedSize(binaryRawSize); final byte[] binaryEncodedBytes = new byte[binaryEncodedSize]; message.encode(binaryEncodedBytes, 0, binaryEncodedSize); return binaryEncodedBytes; }
private byte[] batchBinaryDataSectionBytes(Message sectionMessage, int maxMessageSize) { final int size = messageSerializer.getSize(sectionMessage); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = sectionMessage.encode(encodedBytes, 0, allocationSize); final Message message = Proton.message(); final Data binaryData = new Data(new Binary(encodedBytes, 0, encodedSize)); message.setBody(binaryData); final int binaryRawSize = binaryData.getValue().getLength(); final int binaryEncodedSize = binaryEncodedSize(binaryRawSize); final byte[] binaryEncodedBytes = new byte[binaryEncodedSize]; message.encode(binaryEncodedBytes, 0, binaryEncodedSize); return binaryEncodedBytes; }
class ReactorSender implements AmqpSendLink, AsyncCloseable, AutoCloseable { private static final String DELIVERY_TAG_KEY = "deliveryTag"; private static final String PENDING_SENDS_SIZE_KEY = "pending_sends_size"; private final String entityPath; private final Sender sender; private final SendLinkHandler handler; private final ReactorProvider reactorProvider; private final Disposable.Composite subscriptions; private final AtomicBoolean hasConnected = new AtomicBoolean(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final Object pendingSendLock = new Object(); private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>(); private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue = new PriorityQueue<>(1000, new DeliveryTagComparator()); private final ClientLogger logger; private final Flux<AmqpEndpointState> endpointStates; private final TokenManager tokenManager; private final MessageSerializer messageSerializer; private final AmqpRetryPolicy retry; private final AmqpRetryOptions retryOptions; private final String activeTimeoutMessage; private final Scheduler scheduler; private final AmqpMetricsProvider metricsProvider; private final Object errorConditionLock = new Object(); private volatile Exception lastKnownLinkError; private volatile Instant lastKnownErrorReportedAt; private volatile int linkSize; /** * Creates an instance of {@link ReactorSender}. * * @param amqpConnection The parent {@link AmqpConnection} that this sender lives in. * @param entityPath The message broker address for the sender. * @param sender The underlying proton-j sender. * @param handler The proton-j handler associated with the sender. * @param reactorProvider Provider to schedule work on the proton-j reactor. * @param tokenManager Token manager for authorising with the CBS node. Can be {@code null} if it is part of the * transaction manager. * @param messageSerializer Serializer to deserialise and serialize AMQP messages. * @param retryOptions Retry options. * @param scheduler Scheduler to schedule send timeout. */ ReactorSender(AmqpConnection amqpConnection, String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider, TokenManager tokenManager, MessageSerializer messageSerializer, AmqpRetryOptions retryOptions, Scheduler scheduler, AmqpMetricsProvider metricsProvider) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.sender = Objects.requireNonNull(sender, "'sender' cannot be null."); this.handler = Objects.requireNonNull(handler, "'handler' cannot be null."); this.reactorProvider = Objects.requireNonNull(reactorProvider, "'reactorProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.scheduler = Objects.requireNonNull(scheduler, "'scheduler' cannot be null."); this.retry = RetryUtil.getRetryPolicy(retryOptions); this.tokenManager = tokenManager; this.metricsProvider = metricsProvider; String connectionId = handler.getConnectionId() == null ? NOT_APPLICABLE : handler.getConnectionId(); String linkName = getLinkName() == null ? NOT_APPLICABLE : getLinkName(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(LINK_NAME_KEY, linkName); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(ReactorSender.class, loggingContext); this.activeTimeoutMessage = String.format( "ReactorSender connectionId[%s] linkName[%s]: Waiting for send and receive handler to be ACTIVE", handler.getConnectionId(), handler.getLinkName()); this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.verbose("State {}", state); this.hasConnected.set(state == EndpointState.ACTIVE); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { hasConnected.set(false); handleError(error); }) .doOnComplete(() -> { hasConnected.set(false); handleClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage), this.handler.getLinkCredits().subscribe(credit -> { logger.atVerbose().addKeyValue("credits", credit) .log("Credits on link."); this.scheduleWorkOnDispatcher(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); hasConnected.set(false); return closeAsync("Connection shutdown.", null); }).subscribe() ); if (tokenManager != null) { this.subscriptions.add(tokenManager.getAuthorizationResults().onErrorResume(error -> { final Mono<Void> operation = closeAsync(String.format("connectionId[%s] linkName[%s] Token renewal failure. Disposing send " + "link.", amqpConnection.getId(), getLinkName()), new ErrorCondition(Symbol.getSymbol(NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> { logger.atVerbose().addKeyValue("response", response) .log("Token refreshed."); }, error -> { }, () -> { logger.verbose(" Authorization completed. Disposing."); closeAsync("Authorization completed. Disposing.", null).subscribe(); })); } } @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } @Override public Mono<Void> send(Message message) { return send(message, null); } @Override public Mono<Void> send(Message message, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish message when disposed.", handler.getConnectionId(), getLinkName()))); } return getLinkSize() .flatMap(maxMessageSize -> { final int payloadSize = messageSerializer.getSize(message); final int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] bytes = new byte[allocationSize]; int encodedSize; try { encodedSize = message.encode(bytes, 0, allocationSize); } catch (BufferOverflowException exception) { final String errorMessage = String.format(Locale.US, "Error sending. Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024); final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, errorMessage, exception, handler.getErrorContext(sender)); return Mono.error(error); } return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState); }).then(); } @Override public Mono<Void> send(List<Message> messageBatch) { return send(messageBatch, null); } @Override public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish data batch when disposed.", handler.getConnectionId(), getLinkName()))); } if (messageBatch.size() == 1) { return send(messageBatch.get(0), deliveryState); } return getLinkSize() .flatMap(maxMessageSize -> { int totalEncodedSize = 0; final CompositeReadableBuffer buffer = new CompositeReadableBuffer(); final byte[] envelopBytes = batchEnvelopBytes(messageBatch.get(0), maxMessageSize); if (envelopBytes.length > 0) { totalEncodedSize += envelopBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(envelopBytes); } for (final Message message : messageBatch) { final byte[] sectionBytes = batchBinaryDataSectionBytes(message, maxMessageSize); if (sectionBytes.length > 0) { totalEncodedSize += sectionBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(sectionBytes); } } return send(buffer, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState); }).then(); } private byte[] batchEnvelopBytes(Message envelopMessage, int maxMessageSize) { final Message message = Proton.message(); message.setMessageAnnotations(envelopMessage.getMessageAnnotations()); final int size = messageSerializer.getSize(message); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = message.encode(encodedBytes, 0, allocationSize); return Arrays.copyOf(encodedBytes, encodedSize); } private Mono<Void> batchBufferOverflowError(int maxMessageSize) { return Mono.error(new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), new BufferOverflowException(), handler.getErrorContext(sender))); } /** * Compute the encoded size when encoding a binary data of given size per Amqp 1.0 spec "amqp:data:binary" format. * * @param binaryRawSize the length of the binary data. * @return the encoded size. */ private int binaryEncodedSize(int binaryRawSize) { if (binaryRawSize <= 255) { return 5 + binaryRawSize; } else { return 8 + binaryRawSize; } } @Override public AmqpErrorContext getErrorContext() { return handler.getErrorContext(sender); } @Override public String getLinkName() { return sender.getName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override public Mono<Integer> getLinkSize() { if (linkSize > 0) { return Mono.defer(() -> Mono.just(this.linkSize)); } synchronized (this) { if (linkSize > 0) { return Mono.defer(() -> Mono.just(linkSize)); } return RetryUtil.withRetry(getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE), retryOptions, activeTimeoutMessage) .then(Mono.fromCallable(() -> { final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize(); if (remoteMaxMessageSize != null) { linkSize = remoteMaxMessageSize.intValue(); } else { logger.warning("Could not get the getRemoteMaxMessageSize. Returning current link size: {}", linkSize); } return linkSize; })); } } @Override public boolean isDisposed() { return isDisposed.get(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void dispose() { close(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } /** * Disposes of the sender. * * @param errorCondition Error condition associated with close operation. * @param message Message associated with why the sender was closed. * * @return A mono that completes when the send link has closed. */ Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return isClosedMono.asMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .log("Setting error condition and disposing. {}", message); final Runnable closeWork = () -> { if (errorCondition != null && sender.getCondition() == null) { sender.setCondition(errorCondition); } sender.close(); }; return Mono.fromRunnable(() -> { try { reactorProvider.getReactorDispatcher().invoke(closeWork); } catch (IOException e) { logger.warning("Could not schedule close work. Running manually. And completing close.", e); closeWork.run(); handleClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException scheduling close work. And completing close."); closeWork.run(); handleClose(); } }).then(isClosedMono.asMono()) .publishOn(Schedulers.boundedElastic()); } /** * A mono that completes when the sender has completely closed. * * @return mono that completes when the sender has completely closed. */ Mono<Void> isClosed() { return isClosedMono.asMono(); } @Override public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) { return onEndpointActive().then(Mono.create(sink -> { sendWork(new RetriableWorkItem(bytes, arrayOffset, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } Mono<DeliveryState> send(ReadableBuffer buffer, int messageFormat, DeliveryState deliveryState) { return onEndpointActive().then(Mono.create(sink -> { sendWork(new RetriableWorkItem(buffer, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } private Flux<EndpointState> onEndpointActive() { return RetryUtil.withRetry(handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), retryOptions, activeTimeoutMessage); } /** * Add the work item in pending send to be processed on {@link ReactorDispatcher} thread. * * @param workItem to be processed. */ private void sendWork(RetriableWorkItem workItem) { final String deliveryTag = UUID.randomUUID().toString().replace("-", ""); synchronized (pendingSendLock) { this.pendingSendsMap.put(deliveryTag, workItem); this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0)); } this.scheduleWorkOnDispatcher(); } /** * Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke() */ private void processSendWork() { if (!hasConnected.get()) { logger.warning("Not connected. Not processing send work."); return; } if (isDisposed.get()) { logger.info("Sender is closed. Not executing work."); return; } while (hasConnected.get() && sender.getCredit() > 0) { final WeightedDeliveryTag weightedDelivery; final RetriableWorkItem workItem; final String deliveryTag; synchronized (pendingSendLock) { weightedDelivery = this.pendingSendsQueue.poll(); if (weightedDelivery != null) { deliveryTag = weightedDelivery.getDeliveryTag(); workItem = this.pendingSendsMap.get(deliveryTag); } else { workItem = null; deliveryTag = null; } } if (workItem == null) { if (deliveryTag != null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("sendData not found for this delivery."); } break; } Delivery delivery = null; boolean linkAdvance = false; int sentMsgSize = 0; Exception sendException = null; try { workItem.beforeTry(); delivery = sender.delivery(deliveryTag.getBytes(UTF_8)); delivery.setMessageFormat(workItem.getMessageFormat()); if (workItem.isDeliveryStateProvided()) { delivery.disposition(workItem.getDeliveryState()); } workItem.send(sender); linkAdvance = sender.advance(); } catch (Exception exception) { sendException = exception; } if (linkAdvance) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Sent message."); workItem.setWaitingForAck(); scheduler.schedule(new SendTimeout(deliveryTag), retryOptions.getTryTimeout().toMillis(), TimeUnit.MILLISECONDS); } else { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("sentMessageSize", sentMsgSize) .addKeyValue("payloadActualSize", workItem.getEncodedMessageSize()) .log("Sendlink advance failed."); DeliveryState outcome = null; if (delivery != null) { outcome = delivery.getRemoteState(); delivery.free(); } final AmqpErrorContext context = handler.getErrorContext(sender); final Throwable exception = sendException != null ? new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed. Please see cause for more details", entityPath), sendException, context) : new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed while advancing delivery(tag: %s).", entityPath, deliveryTag), context); workItem.error(exception, outcome); } } } private void processDeliveredMessage(Delivery delivery) { final DeliveryState outcome = delivery.getRemoteState(); final String deliveryTag = new String(delivery.getTag(), UTF_8); logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Process delivered message."); final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Mismatch (or send timed out)."); return; } else if (workItem.isDeliveryStateProvided()) { workItem.success(outcome); return; } if (outcome instanceof Accepted) { synchronized (errorConditionLock) { lastKnownLinkError = null; lastKnownErrorReportedAt = null; retryAttempts.set(0); } workItem.success(outcome); } else if (outcome instanceof Rejected) { final Rejected rejected = (Rejected) outcome; final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError(); final Exception exception = ExceptionUtil.toException(error.getCondition().toString(), error.getDescription(), handler.getErrorContext(sender)); logger.atWarning() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("rejected", rejected) .log("Delivery rejected."); final int retryAttempt; if (isGeneralSendError(error.getCondition())) { synchronized (errorConditionLock) { lastKnownLinkError = exception; lastKnownErrorReportedAt = Instant.now(); retryAttempt = retryAttempts.incrementAndGet(); } } else { retryAttempt = retryAttempts.get(); } final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt); if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) { cleanupFailedSend(workItem, exception, outcome); } else { workItem.setLastKnownException(exception); try { reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval); } catch (IOException | RejectedExecutionException schedulerException) { exception.initCause(schedulerException); cleanupFailedSend( workItem, new AmqpException(false, String.format(Locale.US, "Entity(%s): send operation failed while scheduling a" + " retry on Reactor, see cause for more details.", entityPath), schedulerException, handler.getErrorContext(sender)), outcome); } } } else if (outcome instanceof Released) { cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(), handler.getErrorContext(sender)), outcome); } else if (outcome instanceof Declared) { final Declared declared = (Declared) outcome; workItem.success(declared); } else { cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(), handler.getErrorContext(sender)), outcome); } } private void scheduleWorkOnDispatcher() { try { reactorProvider.getReactorDispatcher().invoke(this::processSendWork); } catch (IOException e) { logger.warning("Error scheduling work on reactor.", e); } catch (RejectedExecutionException e) { logger.info("Error scheduling work on reactor because of RejectedExecutionException."); } } private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception, final DeliveryState deliveryState) { workItem.error(exception, deliveryState); } private void completeClose() { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result).log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } } /** * Clears pending sends and puts an error in there. * * @param error Error to pass to pending sends. */ private void handleError(Throwable error) { synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed. Dropping error."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends with error."); } pendingSendsMap.forEach((key, value) -> value.error(error, null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private void handleClose() { final String message = String.format("Could not complete sends because link '%s' for '%s' is closed.", getLinkName(), entityPath); final AmqpErrorContext context = handler.getErrorContext(sender); synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends."); } pendingSendsMap.forEach((key, value) -> value.error(new AmqpException(true, message, context), null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private static boolean isGeneralSendError(Symbol amqpError) { return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR || amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED); } private static class WeightedDeliveryTag { private final String deliveryTag; private final int priority; WeightedDeliveryTag(final String deliveryTag, final int priority) { this.deliveryTag = deliveryTag; this.priority = priority; } private String getDeliveryTag() { return this.deliveryTag; } private int getPriority() { return this.priority; } } private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable { private static final long serialVersionUID = -7057500582037295635L; @Override public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) { return deliveryTag1.getPriority() - deliveryTag0.getPriority(); } } /** * Keeps track of messages that have been sent, but may not have been acknowledged by the service. */ private class SendTimeout implements Runnable { private final String deliveryTag; SendTimeout(String deliveryTag) { this.deliveryTag = deliveryTag; } @Override public void run() { final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { return; } Exception cause = lastKnownLinkError; final Exception lastError; final Instant lastErrorTime; synchronized (errorConditionLock) { lastError = lastKnownLinkError; lastErrorTime = lastKnownErrorReportedAt; } if (lastError != null && lastErrorTime != null) { final Instant now = Instant.now(); final boolean isLastErrorAfterSleepTime = lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS)); final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime; final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(retryOptions.getTryTimeout())); cause = isServerBusy || isLastErrorAfterOperationTimeout ? lastError : null; } final AmqpException exception; if (cause instanceof AmqpException) { exception = (AmqpException) cause; } else { exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR, String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath), handler.getErrorContext(sender)); } workItem.error(exception, null); } } }
class ReactorSender implements AmqpSendLink, AsyncCloseable, AutoCloseable { private static final String DELIVERY_TAG_KEY = "deliveryTag"; private static final String PENDING_SENDS_SIZE_KEY = "pending_sends_size"; private final String entityPath; private final Sender sender; private final SendLinkHandler handler; private final ReactorProvider reactorProvider; private final Disposable.Composite subscriptions; private final AtomicBoolean hasConnected = new AtomicBoolean(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final Object pendingSendLock = new Object(); private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>(); private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue = new PriorityQueue<>(1000, new DeliveryTagComparator()); private final ClientLogger logger; private final Flux<AmqpEndpointState> endpointStates; private final TokenManager tokenManager; private final MessageSerializer messageSerializer; private final AmqpRetryPolicy retry; private final AmqpRetryOptions retryOptions; private final String activeTimeoutMessage; private final Scheduler scheduler; private final AmqpMetricsProvider metricsProvider; private final Object errorConditionLock = new Object(); private volatile Exception lastKnownLinkError; private volatile Instant lastKnownErrorReportedAt; private volatile int linkSize; /** * Creates an instance of {@link ReactorSender}. * * @param amqpConnection The parent {@link AmqpConnection} that this sender lives in. * @param entityPath The message broker address for the sender. * @param sender The underlying proton-j sender. * @param handler The proton-j handler associated with the sender. * @param reactorProvider Provider to schedule work on the proton-j reactor. * @param tokenManager Token manager for authorising with the CBS node. Can be {@code null} if it is part of the * transaction manager. * @param messageSerializer Serializer to deserialise and serialize AMQP messages. * @param retryOptions Retry options. * @param scheduler Scheduler to schedule send timeout. */ ReactorSender(AmqpConnection amqpConnection, String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider, TokenManager tokenManager, MessageSerializer messageSerializer, AmqpRetryOptions retryOptions, Scheduler scheduler, AmqpMetricsProvider metricsProvider) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.sender = Objects.requireNonNull(sender, "'sender' cannot be null."); this.handler = Objects.requireNonNull(handler, "'handler' cannot be null."); this.reactorProvider = Objects.requireNonNull(reactorProvider, "'reactorProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.scheduler = Objects.requireNonNull(scheduler, "'scheduler' cannot be null."); this.retry = RetryUtil.getRetryPolicy(retryOptions); this.tokenManager = tokenManager; this.metricsProvider = metricsProvider; String connectionId = handler.getConnectionId() == null ? NOT_APPLICABLE : handler.getConnectionId(); String linkName = getLinkName() == null ? NOT_APPLICABLE : getLinkName(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(LINK_NAME_KEY, linkName); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(ReactorSender.class, loggingContext); this.activeTimeoutMessage = String.format( "ReactorSender connectionId[%s] linkName[%s]: Waiting for send and receive handler to be ACTIVE", handler.getConnectionId(), handler.getLinkName()); this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.verbose("State {}", state); this.hasConnected.set(state == EndpointState.ACTIVE); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { hasConnected.set(false); handleError(error); }) .doOnComplete(() -> { hasConnected.set(false); handleClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage), this.handler.getLinkCredits().subscribe(credit -> { logger.atVerbose().addKeyValue("credits", credit) .log("Credits on link."); this.scheduleWorkOnDispatcher(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); hasConnected.set(false); return closeAsync("Connection shutdown.", null); }).subscribe() ); if (tokenManager != null) { this.subscriptions.add(tokenManager.getAuthorizationResults().onErrorResume(error -> { final Mono<Void> operation = closeAsync(String.format("connectionId[%s] linkName[%s] Token renewal failure. Disposing send " + "link.", amqpConnection.getId(), getLinkName()), new ErrorCondition(Symbol.getSymbol(NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> { logger.atVerbose().addKeyValue("response", response) .log("Token refreshed."); }, error -> { }, () -> { logger.verbose(" Authorization completed. Disposing."); closeAsync("Authorization completed. Disposing.", null).subscribe(); })); } } @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } @Override public Mono<Void> send(Message message) { return send(message, null); } @Override public Mono<Void> send(Message message, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish message when disposed.", handler.getConnectionId(), getLinkName()))); } return getLinkSize() .flatMap(maxMessageSize -> { final int payloadSize = messageSerializer.getSize(message); final int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] bytes = new byte[allocationSize]; int encodedSize; try { encodedSize = message.encode(bytes, 0, allocationSize); } catch (BufferOverflowException exception) { final String errorMessage = String.format(Locale.US, "Error sending. Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024); final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, errorMessage, exception, handler.getErrorContext(sender)); return Mono.error(error); } return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState); }).then(); } @Override public Mono<Void> send(List<Message> messageBatch) { return send(messageBatch, null); } @Override public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish data batch when disposed.", handler.getConnectionId(), getLinkName()))); } if (messageBatch.size() == 1) { return send(messageBatch.get(0), deliveryState); } return getLinkSize() .flatMap(maxMessageSize -> { int totalEncodedSize = 0; final CompositeReadableBuffer buffer = new CompositeReadableBuffer(); final byte[] envelopBytes = batchEnvelopBytes(messageBatch.get(0), maxMessageSize); if (envelopBytes.length > 0) { totalEncodedSize += envelopBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(envelopBytes); } for (final Message message : messageBatch) { final byte[] sectionBytes = batchBinaryDataSectionBytes(message, maxMessageSize); if (sectionBytes.length > 0) { totalEncodedSize += sectionBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(sectionBytes); } else { logger.info("Ignoring the empty message org.apache.qpid.proton.message.message@{} in the batch.", Integer.toHexString(System.identityHashCode(message))); } } return send(buffer, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState); }).then(); } private byte[] batchEnvelopBytes(Message envelopMessage, int maxMessageSize) { final Message message = Proton.message(); message.setMessageAnnotations(envelopMessage.getMessageAnnotations()); if ((envelopMessage.getMessageId() instanceof String) && !CoreUtils.isNullOrEmpty((String) envelopMessage.getMessageId())) { message.setMessageId(envelopMessage.getMessageId()); } if (!CoreUtils.isNullOrEmpty(envelopMessage.getGroupId())) { message.setGroupId(envelopMessage.getGroupId()); } final int size = messageSerializer.getSize(message); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = message.encode(encodedBytes, 0, allocationSize); return Arrays.copyOf(encodedBytes, encodedSize); } private Mono<Void> batchBufferOverflowError(int maxMessageSize) { return FluxUtil.monoError(logger, new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), new BufferOverflowException(), handler.getErrorContext(sender))); } /** * Compute the encoded size when encoding a binary data of given size per Amqp 1.0 spec "amqp:data:binary" format. * * @param binaryRawSize the length of the binary data. * @return the encoded size. */ private int binaryEncodedSize(int binaryRawSize) { if (binaryRawSize <= 255) { return 5 + binaryRawSize; } else { return 8 + binaryRawSize; } } @Override public AmqpErrorContext getErrorContext() { return handler.getErrorContext(sender); } @Override public String getLinkName() { return sender.getName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override public Mono<Integer> getLinkSize() { if (linkSize > 0) { return Mono.defer(() -> Mono.just(this.linkSize)); } synchronized (this) { if (linkSize > 0) { return Mono.defer(() -> Mono.just(linkSize)); } return RetryUtil.withRetry(getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE), retryOptions, activeTimeoutMessage) .then(Mono.fromCallable(() -> { final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize(); if (remoteMaxMessageSize != null) { linkSize = remoteMaxMessageSize.intValue(); } else { logger.warning("Could not get the getRemoteMaxMessageSize. Returning current link size: {}", linkSize); } return linkSize; })); } } @Override public boolean isDisposed() { return isDisposed.get(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void dispose() { close(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } /** * Disposes of the sender. * * @param errorCondition Error condition associated with close operation. * @param message Message associated with why the sender was closed. * * @return A mono that completes when the send link has closed. */ Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return isClosedMono.asMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .log("Setting error condition and disposing. {}", message); final Runnable closeWork = () -> { if (errorCondition != null && sender.getCondition() == null) { sender.setCondition(errorCondition); } sender.close(); }; return Mono.fromRunnable(() -> { try { reactorProvider.getReactorDispatcher().invoke(closeWork); } catch (IOException e) { logger.warning("Could not schedule close work. Running manually. And completing close.", e); closeWork.run(); handleClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException scheduling close work. And completing close."); closeWork.run(); handleClose(); } }).then(isClosedMono.asMono()) .publishOn(Schedulers.boundedElastic()); } /** * A mono that completes when the sender has completely closed. * * @return mono that completes when the sender has completely closed. */ Mono<Void> isClosed() { return isClosedMono.asMono(); } @Override public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) { return onEndpointActive().then(Mono.create(sink -> { sendWork(new RetriableWorkItem(bytes, arrayOffset, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } Mono<DeliveryState> send(ReadableBuffer buffer, int messageFormat, DeliveryState deliveryState) { return onEndpointActive().then(Mono.create(sink -> { sendWork(new RetriableWorkItem(buffer, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } private Flux<EndpointState> onEndpointActive() { return RetryUtil.withRetry(handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), retryOptions, activeTimeoutMessage); } /** * Add the work item in pending send to be processed on {@link ReactorDispatcher} thread. * * @param workItem to be processed. */ private void sendWork(RetriableWorkItem workItem) { final String deliveryTag = UUID.randomUUID().toString().replace("-", ""); synchronized (pendingSendLock) { this.pendingSendsMap.put(deliveryTag, workItem); this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0)); } this.scheduleWorkOnDispatcher(); } /** * Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke() */ private void processSendWork() { if (!hasConnected.get()) { logger.warning("Not connected. Not processing send work."); return; } if (isDisposed.get()) { logger.info("Sender is closed. Not executing work."); return; } while (hasConnected.get() && sender.getCredit() > 0) { final WeightedDeliveryTag weightedDelivery; final RetriableWorkItem workItem; final String deliveryTag; synchronized (pendingSendLock) { weightedDelivery = this.pendingSendsQueue.poll(); if (weightedDelivery != null) { deliveryTag = weightedDelivery.getDeliveryTag(); workItem = this.pendingSendsMap.get(deliveryTag); } else { workItem = null; deliveryTag = null; } } if (workItem == null) { if (deliveryTag != null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("sendData not found for this delivery."); } break; } Delivery delivery = null; boolean linkAdvance = false; int sentMsgSize = 0; Exception sendException = null; try { workItem.beforeTry(); delivery = sender.delivery(deliveryTag.getBytes(UTF_8)); delivery.setMessageFormat(workItem.getMessageFormat()); if (workItem.isDeliveryStateProvided()) { delivery.disposition(workItem.getDeliveryState()); } workItem.send(sender); linkAdvance = sender.advance(); } catch (Exception exception) { sendException = exception; } if (linkAdvance) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Sent message."); workItem.setWaitingForAck(); scheduler.schedule(new SendTimeout(deliveryTag), retryOptions.getTryTimeout().toMillis(), TimeUnit.MILLISECONDS); } else { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("sentMessageSize", sentMsgSize) .addKeyValue("payloadActualSize", workItem.getEncodedMessageSize()) .log("Sendlink advance failed."); DeliveryState outcome = null; if (delivery != null) { outcome = delivery.getRemoteState(); delivery.free(); } final AmqpErrorContext context = handler.getErrorContext(sender); final Throwable exception = sendException != null ? new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed. Please see cause for more details", entityPath), sendException, context) : new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed while advancing delivery(tag: %s).", entityPath, deliveryTag), context); workItem.error(exception, outcome); } } } private void processDeliveredMessage(Delivery delivery) { final DeliveryState outcome = delivery.getRemoteState(); final String deliveryTag = new String(delivery.getTag(), UTF_8); logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Process delivered message."); final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Mismatch (or send timed out)."); return; } else if (workItem.isDeliveryStateProvided()) { workItem.success(outcome); return; } if (outcome instanceof Accepted) { synchronized (errorConditionLock) { lastKnownLinkError = null; lastKnownErrorReportedAt = null; retryAttempts.set(0); } workItem.success(outcome); } else if (outcome instanceof Rejected) { final Rejected rejected = (Rejected) outcome; final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError(); final Exception exception = ExceptionUtil.toException(error.getCondition().toString(), error.getDescription(), handler.getErrorContext(sender)); logger.atWarning() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("rejected", rejected) .log("Delivery rejected."); final int retryAttempt; if (isGeneralSendError(error.getCondition())) { synchronized (errorConditionLock) { lastKnownLinkError = exception; lastKnownErrorReportedAt = Instant.now(); retryAttempt = retryAttempts.incrementAndGet(); } } else { retryAttempt = retryAttempts.get(); } final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt); if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) { cleanupFailedSend(workItem, exception, outcome); } else { workItem.setLastKnownException(exception); try { reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval); } catch (IOException | RejectedExecutionException schedulerException) { exception.initCause(schedulerException); cleanupFailedSend( workItem, new AmqpException(false, String.format(Locale.US, "Entity(%s): send operation failed while scheduling a" + " retry on Reactor, see cause for more details.", entityPath), schedulerException, handler.getErrorContext(sender)), outcome); } } } else if (outcome instanceof Released) { cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(), handler.getErrorContext(sender)), outcome); } else if (outcome instanceof Declared) { final Declared declared = (Declared) outcome; workItem.success(declared); } else { cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(), handler.getErrorContext(sender)), outcome); } } private void scheduleWorkOnDispatcher() { try { reactorProvider.getReactorDispatcher().invoke(this::processSendWork); } catch (IOException e) { logger.warning("Error scheduling work on reactor.", e); } catch (RejectedExecutionException e) { logger.info("Error scheduling work on reactor because of RejectedExecutionException."); } } private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception, final DeliveryState deliveryState) { workItem.error(exception, deliveryState); } private void completeClose() { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result).log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } } /** * Clears pending sends and puts an error in there. * * @param error Error to pass to pending sends. */ private void handleError(Throwable error) { synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed. Dropping error."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends with error."); } pendingSendsMap.forEach((key, value) -> value.error(error, null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private void handleClose() { final String message = String.format("Could not complete sends because link '%s' for '%s' is closed.", getLinkName(), entityPath); final AmqpErrorContext context = handler.getErrorContext(sender); synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends."); } pendingSendsMap.forEach((key, value) -> value.error(new AmqpException(true, message, context), null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private static boolean isGeneralSendError(Symbol amqpError) { return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR || amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED); } private static class WeightedDeliveryTag { private final String deliveryTag; private final int priority; WeightedDeliveryTag(final String deliveryTag, final int priority) { this.deliveryTag = deliveryTag; this.priority = priority; } private String getDeliveryTag() { return this.deliveryTag; } private int getPriority() { return this.priority; } } private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable { private static final long serialVersionUID = -7057500582037295635L; @Override public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) { return deliveryTag1.getPriority() - deliveryTag0.getPriority(); } } /** * Keeps track of messages that have been sent, but may not have been acknowledged by the service. */ private class SendTimeout implements Runnable { private final String deliveryTag; SendTimeout(String deliveryTag) { this.deliveryTag = deliveryTag; } @Override public void run() { final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { return; } Exception cause = lastKnownLinkError; final Exception lastError; final Instant lastErrorTime; synchronized (errorConditionLock) { lastError = lastKnownLinkError; lastErrorTime = lastKnownErrorReportedAt; } if (lastError != null && lastErrorTime != null) { final Instant now = Instant.now(); final boolean isLastErrorAfterSleepTime = lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS)); final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime; final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(retryOptions.getTryTimeout())); cause = isServerBusy || isLastErrorAfterOperationTimeout ? lastError : null; } final AmqpException exception; if (cause instanceof AmqpException) { exception = (AmqpException) cause; } else { exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR, String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath), handler.getErrorContext(sender)); } workItem.error(exception, null); } } }
Good catch!!, I'll update to use monoError.
private int binaryEncodedSize(int binaryRawSize) { if (binaryRawSize <= 255) { return 5 + binaryRawSize; } else { return 8 + binaryRawSize; } }
private int binaryEncodedSize(int binaryRawSize) { if (binaryRawSize <= 255) { return 5 + binaryRawSize; } else { return 8 + binaryRawSize; } }
class ReactorSender implements AmqpSendLink, AsyncCloseable, AutoCloseable { private static final String DELIVERY_TAG_KEY = "deliveryTag"; private static final String PENDING_SENDS_SIZE_KEY = "pending_sends_size"; private final String entityPath; private final Sender sender; private final SendLinkHandler handler; private final ReactorProvider reactorProvider; private final Disposable.Composite subscriptions; private final AtomicBoolean hasConnected = new AtomicBoolean(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final Object pendingSendLock = new Object(); private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>(); private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue = new PriorityQueue<>(1000, new DeliveryTagComparator()); private final ClientLogger logger; private final Flux<AmqpEndpointState> endpointStates; private final TokenManager tokenManager; private final MessageSerializer messageSerializer; private final AmqpRetryPolicy retry; private final AmqpRetryOptions retryOptions; private final String activeTimeoutMessage; private final Scheduler scheduler; private final AmqpMetricsProvider metricsProvider; private final Object errorConditionLock = new Object(); private volatile Exception lastKnownLinkError; private volatile Instant lastKnownErrorReportedAt; private volatile int linkSize; /** * Creates an instance of {@link ReactorSender}. * * @param amqpConnection The parent {@link AmqpConnection} that this sender lives in. * @param entityPath The message broker address for the sender. * @param sender The underlying proton-j sender. * @param handler The proton-j handler associated with the sender. * @param reactorProvider Provider to schedule work on the proton-j reactor. * @param tokenManager Token manager for authorising with the CBS node. Can be {@code null} if it is part of the * transaction manager. * @param messageSerializer Serializer to deserialise and serialize AMQP messages. * @param retryOptions Retry options. * @param scheduler Scheduler to schedule send timeout. */ ReactorSender(AmqpConnection amqpConnection, String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider, TokenManager tokenManager, MessageSerializer messageSerializer, AmqpRetryOptions retryOptions, Scheduler scheduler, AmqpMetricsProvider metricsProvider) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.sender = Objects.requireNonNull(sender, "'sender' cannot be null."); this.handler = Objects.requireNonNull(handler, "'handler' cannot be null."); this.reactorProvider = Objects.requireNonNull(reactorProvider, "'reactorProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.scheduler = Objects.requireNonNull(scheduler, "'scheduler' cannot be null."); this.retry = RetryUtil.getRetryPolicy(retryOptions); this.tokenManager = tokenManager; this.metricsProvider = metricsProvider; String connectionId = handler.getConnectionId() == null ? NOT_APPLICABLE : handler.getConnectionId(); String linkName = getLinkName() == null ? NOT_APPLICABLE : getLinkName(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(LINK_NAME_KEY, linkName); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(ReactorSender.class, loggingContext); this.activeTimeoutMessage = String.format( "ReactorSender connectionId[%s] linkName[%s]: Waiting for send and receive handler to be ACTIVE", handler.getConnectionId(), handler.getLinkName()); this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.verbose("State {}", state); this.hasConnected.set(state == EndpointState.ACTIVE); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { hasConnected.set(false); handleError(error); }) .doOnComplete(() -> { hasConnected.set(false); handleClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage), this.handler.getLinkCredits().subscribe(credit -> { logger.atVerbose().addKeyValue("credits", credit) .log("Credits on link."); this.scheduleWorkOnDispatcher(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); hasConnected.set(false); return closeAsync("Connection shutdown.", null); }).subscribe() ); if (tokenManager != null) { this.subscriptions.add(tokenManager.getAuthorizationResults().onErrorResume(error -> { final Mono<Void> operation = closeAsync(String.format("connectionId[%s] linkName[%s] Token renewal failure. Disposing send " + "link.", amqpConnection.getId(), getLinkName()), new ErrorCondition(Symbol.getSymbol(NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> { logger.atVerbose().addKeyValue("response", response) .log("Token refreshed."); }, error -> { }, () -> { logger.verbose(" Authorization completed. Disposing."); closeAsync("Authorization completed. Disposing.", null).subscribe(); })); } } @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } @Override public Mono<Void> send(Message message) { return send(message, null); } @Override public Mono<Void> send(Message message, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish message when disposed.", handler.getConnectionId(), getLinkName()))); } return getLinkSize() .flatMap(maxMessageSize -> { final int payloadSize = messageSerializer.getSize(message); final int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] bytes = new byte[allocationSize]; int encodedSize; try { encodedSize = message.encode(bytes, 0, allocationSize); } catch (BufferOverflowException exception) { final String errorMessage = String.format(Locale.US, "Error sending. Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024); final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, errorMessage, exception, handler.getErrorContext(sender)); return Mono.error(error); } return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState); }).then(); } @Override public Mono<Void> send(List<Message> messageBatch) { return send(messageBatch, null); } @Override public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish data batch when disposed.", handler.getConnectionId(), getLinkName()))); } if (messageBatch.size() == 1) { return send(messageBatch.get(0), deliveryState); } return getLinkSize() .flatMap(maxMessageSize -> { int totalEncodedSize = 0; final CompositeReadableBuffer buffer = new CompositeReadableBuffer(); final byte[] envelopBytes = batchEnvelopBytes(messageBatch.get(0), maxMessageSize); if (envelopBytes.length > 0) { totalEncodedSize += envelopBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(envelopBytes); } for (final Message message : messageBatch) { final byte[] sectionBytes = batchBinaryDataSectionBytes(message, maxMessageSize); if (sectionBytes.length > 0) { totalEncodedSize += sectionBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(sectionBytes); } } return send(buffer, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState); }).then(); } private byte[] batchEnvelopBytes(Message envelopMessage, int maxMessageSize) { final Message message = Proton.message(); message.setMessageAnnotations(envelopMessage.getMessageAnnotations()); final int size = messageSerializer.getSize(message); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = message.encode(encodedBytes, 0, allocationSize); return Arrays.copyOf(encodedBytes, encodedSize); } private byte[] batchBinaryDataSectionBytes(Message sectionMessage, int maxMessageSize) { final int size = messageSerializer.getSize(sectionMessage); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = sectionMessage.encode(encodedBytes, 0, allocationSize); final Message message = Proton.message(); final Data binaryData = new Data(new Binary(encodedBytes, 0, encodedSize)); message.setBody(binaryData); final int binaryRawSize = binaryData.getValue().getLength(); final int binaryEncodedSize = binaryEncodedSize(binaryRawSize); final byte[] binaryEncodedBytes = new byte[binaryEncodedSize]; message.encode(binaryEncodedBytes, 0, binaryEncodedSize); return binaryEncodedBytes; } private Mono<Void> batchBufferOverflowError(int maxMessageSize) { return Mono.error(new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), new BufferOverflowException(), handler.getErrorContext(sender))); } /** * Compute the encoded size when encoding a binary data of given size per Amqp 1.0 spec "amqp:data:binary" format. * * @param binaryRawSize the length of the binary data. * @return the encoded size. */ @Override public AmqpErrorContext getErrorContext() { return handler.getErrorContext(sender); } @Override public String getLinkName() { return sender.getName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override public Mono<Integer> getLinkSize() { if (linkSize > 0) { return Mono.defer(() -> Mono.just(this.linkSize)); } synchronized (this) { if (linkSize > 0) { return Mono.defer(() -> Mono.just(linkSize)); } return RetryUtil.withRetry(getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE), retryOptions, activeTimeoutMessage) .then(Mono.fromCallable(() -> { final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize(); if (remoteMaxMessageSize != null) { linkSize = remoteMaxMessageSize.intValue(); } else { logger.warning("Could not get the getRemoteMaxMessageSize. Returning current link size: {}", linkSize); } return linkSize; })); } } @Override public boolean isDisposed() { return isDisposed.get(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void dispose() { close(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } /** * Disposes of the sender. * * @param errorCondition Error condition associated with close operation. * @param message Message associated with why the sender was closed. * * @return A mono that completes when the send link has closed. */ Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return isClosedMono.asMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .log("Setting error condition and disposing. {}", message); final Runnable closeWork = () -> { if (errorCondition != null && sender.getCondition() == null) { sender.setCondition(errorCondition); } sender.close(); }; return Mono.fromRunnable(() -> { try { reactorProvider.getReactorDispatcher().invoke(closeWork); } catch (IOException e) { logger.warning("Could not schedule close work. Running manually. And completing close.", e); closeWork.run(); handleClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException scheduling close work. And completing close."); closeWork.run(); handleClose(); } }).then(isClosedMono.asMono()) .publishOn(Schedulers.boundedElastic()); } /** * A mono that completes when the sender has completely closed. * * @return mono that completes when the sender has completely closed. */ Mono<Void> isClosed() { return isClosedMono.asMono(); } @Override public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) { return onEndpointActive().then(Mono.create(sink -> { sendWork(new RetriableWorkItem(bytes, arrayOffset, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } Mono<DeliveryState> send(ReadableBuffer buffer, int messageFormat, DeliveryState deliveryState) { return onEndpointActive().then(Mono.create(sink -> { sendWork(new RetriableWorkItem(buffer, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } private Flux<EndpointState> onEndpointActive() { return RetryUtil.withRetry(handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), retryOptions, activeTimeoutMessage); } /** * Add the work item in pending send to be processed on {@link ReactorDispatcher} thread. * * @param workItem to be processed. */ private void sendWork(RetriableWorkItem workItem) { final String deliveryTag = UUID.randomUUID().toString().replace("-", ""); synchronized (pendingSendLock) { this.pendingSendsMap.put(deliveryTag, workItem); this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0)); } this.scheduleWorkOnDispatcher(); } /** * Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke() */ private void processSendWork() { if (!hasConnected.get()) { logger.warning("Not connected. Not processing send work."); return; } if (isDisposed.get()) { logger.info("Sender is closed. Not executing work."); return; } while (hasConnected.get() && sender.getCredit() > 0) { final WeightedDeliveryTag weightedDelivery; final RetriableWorkItem workItem; final String deliveryTag; synchronized (pendingSendLock) { weightedDelivery = this.pendingSendsQueue.poll(); if (weightedDelivery != null) { deliveryTag = weightedDelivery.getDeliveryTag(); workItem = this.pendingSendsMap.get(deliveryTag); } else { workItem = null; deliveryTag = null; } } if (workItem == null) { if (deliveryTag != null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("sendData not found for this delivery."); } break; } Delivery delivery = null; boolean linkAdvance = false; int sentMsgSize = 0; Exception sendException = null; try { workItem.beforeTry(); delivery = sender.delivery(deliveryTag.getBytes(UTF_8)); delivery.setMessageFormat(workItem.getMessageFormat()); if (workItem.isDeliveryStateProvided()) { delivery.disposition(workItem.getDeliveryState()); } workItem.send(sender); linkAdvance = sender.advance(); } catch (Exception exception) { sendException = exception; } if (linkAdvance) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Sent message."); workItem.setWaitingForAck(); scheduler.schedule(new SendTimeout(deliveryTag), retryOptions.getTryTimeout().toMillis(), TimeUnit.MILLISECONDS); } else { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("sentMessageSize", sentMsgSize) .addKeyValue("payloadActualSize", workItem.getEncodedMessageSize()) .log("Sendlink advance failed."); DeliveryState outcome = null; if (delivery != null) { outcome = delivery.getRemoteState(); delivery.free(); } final AmqpErrorContext context = handler.getErrorContext(sender); final Throwable exception = sendException != null ? new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed. Please see cause for more details", entityPath), sendException, context) : new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed while advancing delivery(tag: %s).", entityPath, deliveryTag), context); workItem.error(exception, outcome); } } } private void processDeliveredMessage(Delivery delivery) { final DeliveryState outcome = delivery.getRemoteState(); final String deliveryTag = new String(delivery.getTag(), UTF_8); logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Process delivered message."); final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Mismatch (or send timed out)."); return; } else if (workItem.isDeliveryStateProvided()) { workItem.success(outcome); return; } if (outcome instanceof Accepted) { synchronized (errorConditionLock) { lastKnownLinkError = null; lastKnownErrorReportedAt = null; retryAttempts.set(0); } workItem.success(outcome); } else if (outcome instanceof Rejected) { final Rejected rejected = (Rejected) outcome; final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError(); final Exception exception = ExceptionUtil.toException(error.getCondition().toString(), error.getDescription(), handler.getErrorContext(sender)); logger.atWarning() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("rejected", rejected) .log("Delivery rejected."); final int retryAttempt; if (isGeneralSendError(error.getCondition())) { synchronized (errorConditionLock) { lastKnownLinkError = exception; lastKnownErrorReportedAt = Instant.now(); retryAttempt = retryAttempts.incrementAndGet(); } } else { retryAttempt = retryAttempts.get(); } final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt); if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) { cleanupFailedSend(workItem, exception, outcome); } else { workItem.setLastKnownException(exception); try { reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval); } catch (IOException | RejectedExecutionException schedulerException) { exception.initCause(schedulerException); cleanupFailedSend( workItem, new AmqpException(false, String.format(Locale.US, "Entity(%s): send operation failed while scheduling a" + " retry on Reactor, see cause for more details.", entityPath), schedulerException, handler.getErrorContext(sender)), outcome); } } } else if (outcome instanceof Released) { cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(), handler.getErrorContext(sender)), outcome); } else if (outcome instanceof Declared) { final Declared declared = (Declared) outcome; workItem.success(declared); } else { cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(), handler.getErrorContext(sender)), outcome); } } private void scheduleWorkOnDispatcher() { try { reactorProvider.getReactorDispatcher().invoke(this::processSendWork); } catch (IOException e) { logger.warning("Error scheduling work on reactor.", e); } catch (RejectedExecutionException e) { logger.info("Error scheduling work on reactor because of RejectedExecutionException."); } } private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception, final DeliveryState deliveryState) { workItem.error(exception, deliveryState); } private void completeClose() { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result).log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } } /** * Clears pending sends and puts an error in there. * * @param error Error to pass to pending sends. */ private void handleError(Throwable error) { synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed. Dropping error."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends with error."); } pendingSendsMap.forEach((key, value) -> value.error(error, null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private void handleClose() { final String message = String.format("Could not complete sends because link '%s' for '%s' is closed.", getLinkName(), entityPath); final AmqpErrorContext context = handler.getErrorContext(sender); synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends."); } pendingSendsMap.forEach((key, value) -> value.error(new AmqpException(true, message, context), null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private static boolean isGeneralSendError(Symbol amqpError) { return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR || amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED); } private static class WeightedDeliveryTag { private final String deliveryTag; private final int priority; WeightedDeliveryTag(final String deliveryTag, final int priority) { this.deliveryTag = deliveryTag; this.priority = priority; } private String getDeliveryTag() { return this.deliveryTag; } private int getPriority() { return this.priority; } } private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable { private static final long serialVersionUID = -7057500582037295635L; @Override public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) { return deliveryTag1.getPriority() - deliveryTag0.getPriority(); } } /** * Keeps track of messages that have been sent, but may not have been acknowledged by the service. */ private class SendTimeout implements Runnable { private final String deliveryTag; SendTimeout(String deliveryTag) { this.deliveryTag = deliveryTag; } @Override public void run() { final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { return; } Exception cause = lastKnownLinkError; final Exception lastError; final Instant lastErrorTime; synchronized (errorConditionLock) { lastError = lastKnownLinkError; lastErrorTime = lastKnownErrorReportedAt; } if (lastError != null && lastErrorTime != null) { final Instant now = Instant.now(); final boolean isLastErrorAfterSleepTime = lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS)); final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime; final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(retryOptions.getTryTimeout())); cause = isServerBusy || isLastErrorAfterOperationTimeout ? lastError : null; } final AmqpException exception; if (cause instanceof AmqpException) { exception = (AmqpException) cause; } else { exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR, String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath), handler.getErrorContext(sender)); } workItem.error(exception, null); } } }
class ReactorSender implements AmqpSendLink, AsyncCloseable, AutoCloseable { private static final String DELIVERY_TAG_KEY = "deliveryTag"; private static final String PENDING_SENDS_SIZE_KEY = "pending_sends_size"; private final String entityPath; private final Sender sender; private final SendLinkHandler handler; private final ReactorProvider reactorProvider; private final Disposable.Composite subscriptions; private final AtomicBoolean hasConnected = new AtomicBoolean(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final Object pendingSendLock = new Object(); private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>(); private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue = new PriorityQueue<>(1000, new DeliveryTagComparator()); private final ClientLogger logger; private final Flux<AmqpEndpointState> endpointStates; private final TokenManager tokenManager; private final MessageSerializer messageSerializer; private final AmqpRetryPolicy retry; private final AmqpRetryOptions retryOptions; private final String activeTimeoutMessage; private final Scheduler scheduler; private final AmqpMetricsProvider metricsProvider; private final Object errorConditionLock = new Object(); private volatile Exception lastKnownLinkError; private volatile Instant lastKnownErrorReportedAt; private volatile int linkSize; /** * Creates an instance of {@link ReactorSender}. * * @param amqpConnection The parent {@link AmqpConnection} that this sender lives in. * @param entityPath The message broker address for the sender. * @param sender The underlying proton-j sender. * @param handler The proton-j handler associated with the sender. * @param reactorProvider Provider to schedule work on the proton-j reactor. * @param tokenManager Token manager for authorising with the CBS node. Can be {@code null} if it is part of the * transaction manager. * @param messageSerializer Serializer to deserialise and serialize AMQP messages. * @param retryOptions Retry options. * @param scheduler Scheduler to schedule send timeout. */ ReactorSender(AmqpConnection amqpConnection, String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider, TokenManager tokenManager, MessageSerializer messageSerializer, AmqpRetryOptions retryOptions, Scheduler scheduler, AmqpMetricsProvider metricsProvider) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.sender = Objects.requireNonNull(sender, "'sender' cannot be null."); this.handler = Objects.requireNonNull(handler, "'handler' cannot be null."); this.reactorProvider = Objects.requireNonNull(reactorProvider, "'reactorProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.scheduler = Objects.requireNonNull(scheduler, "'scheduler' cannot be null."); this.retry = RetryUtil.getRetryPolicy(retryOptions); this.tokenManager = tokenManager; this.metricsProvider = metricsProvider; String connectionId = handler.getConnectionId() == null ? NOT_APPLICABLE : handler.getConnectionId(); String linkName = getLinkName() == null ? NOT_APPLICABLE : getLinkName(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(LINK_NAME_KEY, linkName); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(ReactorSender.class, loggingContext); this.activeTimeoutMessage = String.format( "ReactorSender connectionId[%s] linkName[%s]: Waiting for send and receive handler to be ACTIVE", handler.getConnectionId(), handler.getLinkName()); this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.verbose("State {}", state); this.hasConnected.set(state == EndpointState.ACTIVE); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { hasConnected.set(false); handleError(error); }) .doOnComplete(() -> { hasConnected.set(false); handleClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage), this.handler.getLinkCredits().subscribe(credit -> { logger.atVerbose().addKeyValue("credits", credit) .log("Credits on link."); this.scheduleWorkOnDispatcher(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); hasConnected.set(false); return closeAsync("Connection shutdown.", null); }).subscribe() ); if (tokenManager != null) { this.subscriptions.add(tokenManager.getAuthorizationResults().onErrorResume(error -> { final Mono<Void> operation = closeAsync(String.format("connectionId[%s] linkName[%s] Token renewal failure. Disposing send " + "link.", amqpConnection.getId(), getLinkName()), new ErrorCondition(Symbol.getSymbol(NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> { logger.atVerbose().addKeyValue("response", response) .log("Token refreshed."); }, error -> { }, () -> { logger.verbose(" Authorization completed. Disposing."); closeAsync("Authorization completed. Disposing.", null).subscribe(); })); } } @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } @Override public Mono<Void> send(Message message) { return send(message, null); } @Override public Mono<Void> send(Message message, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish message when disposed.", handler.getConnectionId(), getLinkName()))); } return getLinkSize() .flatMap(maxMessageSize -> { final int payloadSize = messageSerializer.getSize(message); final int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] bytes = new byte[allocationSize]; int encodedSize; try { encodedSize = message.encode(bytes, 0, allocationSize); } catch (BufferOverflowException exception) { final String errorMessage = String.format(Locale.US, "Error sending. Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024); final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, errorMessage, exception, handler.getErrorContext(sender)); return Mono.error(error); } return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState); }).then(); } @Override public Mono<Void> send(List<Message> messageBatch) { return send(messageBatch, null); } @Override public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish data batch when disposed.", handler.getConnectionId(), getLinkName()))); } if (messageBatch.size() == 1) { return send(messageBatch.get(0), deliveryState); } return getLinkSize() .flatMap(maxMessageSize -> { int totalEncodedSize = 0; final CompositeReadableBuffer buffer = new CompositeReadableBuffer(); final byte[] envelopBytes = batchEnvelopBytes(messageBatch.get(0), maxMessageSize); if (envelopBytes.length > 0) { totalEncodedSize += envelopBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(envelopBytes); } for (final Message message : messageBatch) { final byte[] sectionBytes = batchBinaryDataSectionBytes(message, maxMessageSize); if (sectionBytes.length > 0) { totalEncodedSize += sectionBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(sectionBytes); } else { logger.info("Ignoring the empty message org.apache.qpid.proton.message.message@{} in the batch.", Integer.toHexString(System.identityHashCode(message))); } } return send(buffer, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState); }).then(); } private byte[] batchEnvelopBytes(Message envelopMessage, int maxMessageSize) { final Message message = Proton.message(); message.setMessageAnnotations(envelopMessage.getMessageAnnotations()); if ((envelopMessage.getMessageId() instanceof String) && !CoreUtils.isNullOrEmpty((String) envelopMessage.getMessageId())) { message.setMessageId(envelopMessage.getMessageId()); } if (!CoreUtils.isNullOrEmpty(envelopMessage.getGroupId())) { message.setGroupId(envelopMessage.getGroupId()); } final int size = messageSerializer.getSize(message); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = message.encode(encodedBytes, 0, allocationSize); return Arrays.copyOf(encodedBytes, encodedSize); } private byte[] batchBinaryDataSectionBytes(Message sectionMessage, int maxMessageSize) { final int size = messageSerializer.getSize(sectionMessage); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = sectionMessage.encode(encodedBytes, 0, allocationSize); final Message message = Proton.message(); final Data binaryData = new Data(new Binary(encodedBytes, 0, encodedSize)); message.setBody(binaryData); final int binaryRawSize = binaryData.getValue().getLength(); final int binaryEncodedSize = binaryEncodedSize(binaryRawSize); final byte[] binaryEncodedBytes = new byte[binaryEncodedSize]; message.encode(binaryEncodedBytes, 0, binaryEncodedSize); return binaryEncodedBytes; } private Mono<Void> batchBufferOverflowError(int maxMessageSize) { return FluxUtil.monoError(logger, new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), new BufferOverflowException(), handler.getErrorContext(sender))); } /** * Compute the encoded size when encoding a binary data of given size per Amqp 1.0 spec "amqp:data:binary" format. * * @param binaryRawSize the length of the binary data. * @return the encoded size. */ @Override public AmqpErrorContext getErrorContext() { return handler.getErrorContext(sender); } @Override public String getLinkName() { return sender.getName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override public Mono<Integer> getLinkSize() { if (linkSize > 0) { return Mono.defer(() -> Mono.just(this.linkSize)); } synchronized (this) { if (linkSize > 0) { return Mono.defer(() -> Mono.just(linkSize)); } return RetryUtil.withRetry(getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE), retryOptions, activeTimeoutMessage) .then(Mono.fromCallable(() -> { final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize(); if (remoteMaxMessageSize != null) { linkSize = remoteMaxMessageSize.intValue(); } else { logger.warning("Could not get the getRemoteMaxMessageSize. Returning current link size: {}", linkSize); } return linkSize; })); } } @Override public boolean isDisposed() { return isDisposed.get(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void dispose() { close(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } /** * Disposes of the sender. * * @param errorCondition Error condition associated with close operation. * @param message Message associated with why the sender was closed. * * @return A mono that completes when the send link has closed. */ Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return isClosedMono.asMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .log("Setting error condition and disposing. {}", message); final Runnable closeWork = () -> { if (errorCondition != null && sender.getCondition() == null) { sender.setCondition(errorCondition); } sender.close(); }; return Mono.fromRunnable(() -> { try { reactorProvider.getReactorDispatcher().invoke(closeWork); } catch (IOException e) { logger.warning("Could not schedule close work. Running manually. And completing close.", e); closeWork.run(); handleClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException scheduling close work. And completing close."); closeWork.run(); handleClose(); } }).then(isClosedMono.asMono()) .publishOn(Schedulers.boundedElastic()); } /** * A mono that completes when the sender has completely closed. * * @return mono that completes when the sender has completely closed. */ Mono<Void> isClosed() { return isClosedMono.asMono(); } @Override public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) { return onEndpointActive().then(Mono.create(sink -> { sendWork(new RetriableWorkItem(bytes, arrayOffset, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } Mono<DeliveryState> send(ReadableBuffer buffer, int messageFormat, DeliveryState deliveryState) { return onEndpointActive().then(Mono.create(sink -> { sendWork(new RetriableWorkItem(buffer, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } private Flux<EndpointState> onEndpointActive() { return RetryUtil.withRetry(handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), retryOptions, activeTimeoutMessage); } /** * Add the work item in pending send to be processed on {@link ReactorDispatcher} thread. * * @param workItem to be processed. */ private void sendWork(RetriableWorkItem workItem) { final String deliveryTag = UUID.randomUUID().toString().replace("-", ""); synchronized (pendingSendLock) { this.pendingSendsMap.put(deliveryTag, workItem); this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0)); } this.scheduleWorkOnDispatcher(); } /** * Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke() */ private void processSendWork() { if (!hasConnected.get()) { logger.warning("Not connected. Not processing send work."); return; } if (isDisposed.get()) { logger.info("Sender is closed. Not executing work."); return; } while (hasConnected.get() && sender.getCredit() > 0) { final WeightedDeliveryTag weightedDelivery; final RetriableWorkItem workItem; final String deliveryTag; synchronized (pendingSendLock) { weightedDelivery = this.pendingSendsQueue.poll(); if (weightedDelivery != null) { deliveryTag = weightedDelivery.getDeliveryTag(); workItem = this.pendingSendsMap.get(deliveryTag); } else { workItem = null; deliveryTag = null; } } if (workItem == null) { if (deliveryTag != null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("sendData not found for this delivery."); } break; } Delivery delivery = null; boolean linkAdvance = false; int sentMsgSize = 0; Exception sendException = null; try { workItem.beforeTry(); delivery = sender.delivery(deliveryTag.getBytes(UTF_8)); delivery.setMessageFormat(workItem.getMessageFormat()); if (workItem.isDeliveryStateProvided()) { delivery.disposition(workItem.getDeliveryState()); } workItem.send(sender); linkAdvance = sender.advance(); } catch (Exception exception) { sendException = exception; } if (linkAdvance) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Sent message."); workItem.setWaitingForAck(); scheduler.schedule(new SendTimeout(deliveryTag), retryOptions.getTryTimeout().toMillis(), TimeUnit.MILLISECONDS); } else { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("sentMessageSize", sentMsgSize) .addKeyValue("payloadActualSize", workItem.getEncodedMessageSize()) .log("Sendlink advance failed."); DeliveryState outcome = null; if (delivery != null) { outcome = delivery.getRemoteState(); delivery.free(); } final AmqpErrorContext context = handler.getErrorContext(sender); final Throwable exception = sendException != null ? new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed. Please see cause for more details", entityPath), sendException, context) : new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed while advancing delivery(tag: %s).", entityPath, deliveryTag), context); workItem.error(exception, outcome); } } } private void processDeliveredMessage(Delivery delivery) { final DeliveryState outcome = delivery.getRemoteState(); final String deliveryTag = new String(delivery.getTag(), UTF_8); logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Process delivered message."); final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Mismatch (or send timed out)."); return; } else if (workItem.isDeliveryStateProvided()) { workItem.success(outcome); return; } if (outcome instanceof Accepted) { synchronized (errorConditionLock) { lastKnownLinkError = null; lastKnownErrorReportedAt = null; retryAttempts.set(0); } workItem.success(outcome); } else if (outcome instanceof Rejected) { final Rejected rejected = (Rejected) outcome; final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError(); final Exception exception = ExceptionUtil.toException(error.getCondition().toString(), error.getDescription(), handler.getErrorContext(sender)); logger.atWarning() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("rejected", rejected) .log("Delivery rejected."); final int retryAttempt; if (isGeneralSendError(error.getCondition())) { synchronized (errorConditionLock) { lastKnownLinkError = exception; lastKnownErrorReportedAt = Instant.now(); retryAttempt = retryAttempts.incrementAndGet(); } } else { retryAttempt = retryAttempts.get(); } final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt); if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) { cleanupFailedSend(workItem, exception, outcome); } else { workItem.setLastKnownException(exception); try { reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval); } catch (IOException | RejectedExecutionException schedulerException) { exception.initCause(schedulerException); cleanupFailedSend( workItem, new AmqpException(false, String.format(Locale.US, "Entity(%s): send operation failed while scheduling a" + " retry on Reactor, see cause for more details.", entityPath), schedulerException, handler.getErrorContext(sender)), outcome); } } } else if (outcome instanceof Released) { cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(), handler.getErrorContext(sender)), outcome); } else if (outcome instanceof Declared) { final Declared declared = (Declared) outcome; workItem.success(declared); } else { cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(), handler.getErrorContext(sender)), outcome); } } private void scheduleWorkOnDispatcher() { try { reactorProvider.getReactorDispatcher().invoke(this::processSendWork); } catch (IOException e) { logger.warning("Error scheduling work on reactor.", e); } catch (RejectedExecutionException e) { logger.info("Error scheduling work on reactor because of RejectedExecutionException."); } } private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception, final DeliveryState deliveryState) { workItem.error(exception, deliveryState); } private void completeClose() { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result).log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } } /** * Clears pending sends and puts an error in there. * * @param error Error to pass to pending sends. */ private void handleError(Throwable error) { synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed. Dropping error."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends with error."); } pendingSendsMap.forEach((key, value) -> value.error(error, null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private void handleClose() { final String message = String.format("Could not complete sends because link '%s' for '%s' is closed.", getLinkName(), entityPath); final AmqpErrorContext context = handler.getErrorContext(sender); synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends."); } pendingSendsMap.forEach((key, value) -> value.error(new AmqpException(true, message, context), null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private static boolean isGeneralSendError(Symbol amqpError) { return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR || amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED); } private static class WeightedDeliveryTag { private final String deliveryTag; private final int priority; WeightedDeliveryTag(final String deliveryTag, final int priority) { this.deliveryTag = deliveryTag; this.priority = priority; } private String getDeliveryTag() { return this.deliveryTag; } private int getPriority() { return this.priority; } } private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable { private static final long serialVersionUID = -7057500582037295635L; @Override public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) { return deliveryTag1.getPriority() - deliveryTag0.getPriority(); } } /** * Keeps track of messages that have been sent, but may not have been acknowledged by the service. */ private class SendTimeout implements Runnable { private final String deliveryTag; SendTimeout(String deliveryTag) { this.deliveryTag = deliveryTag; } @Override public void run() { final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { return; } Exception cause = lastKnownLinkError; final Exception lastError; final Instant lastErrorTime; synchronized (errorConditionLock) { lastError = lastKnownLinkError; lastErrorTime = lastKnownErrorReportedAt; } if (lastError != null && lastErrorTime != null) { final Instant now = Instant.now(); final boolean isLastErrorAfterSleepTime = lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS)); final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime; final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(retryOptions.getTryTimeout())); cause = isServerBusy || isLastErrorAfterOperationTimeout ? lastError : null; } final AmqpException exception; if (cause instanceof AmqpException) { exception = (AmqpException) cause; } else { exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR, String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath), handler.getErrorContext(sender)); } workItem.error(exception, null); } } }
Great question. So if the QPid Message object is empty (i.e., none of it properties are set), then _encode_ will return 0; since there is nothing to send for such a message, we cannot add an empty byte[] to CompositeReadableBuffer. I'll add the logs. One note for future reference: The ProtonJ library won't allow sending an empty byte array, so if all messages in the batch are empty, the underlying API will error out. For both batch and non-batch the error will be the same in such case.
public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish data batch when disposed.", handler.getConnectionId(), getLinkName()))); } if (messageBatch.size() == 1) { return send(messageBatch.get(0), deliveryState); } return getLinkSize() .flatMap(maxMessageSize -> { int totalEncodedSize = 0; final CompositeReadableBuffer buffer = new CompositeReadableBuffer(); final byte[] envelopBytes = batchEnvelopBytes(messageBatch.get(0), maxMessageSize); if (envelopBytes.length > 0) { totalEncodedSize += envelopBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(envelopBytes); } for (final Message message : messageBatch) { final byte[] sectionBytes = batchBinaryDataSectionBytes(message, maxMessageSize); if (sectionBytes.length > 0) { totalEncodedSize += sectionBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(sectionBytes); } } return send(buffer, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState); }).then(); }
totalEncodedSize += sectionBytes.length;
public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish data batch when disposed.", handler.getConnectionId(), getLinkName()))); } if (messageBatch.size() == 1) { return send(messageBatch.get(0), deliveryState); } return getLinkSize() .flatMap(maxMessageSize -> { int totalEncodedSize = 0; final CompositeReadableBuffer buffer = new CompositeReadableBuffer(); final byte[] envelopBytes = batchEnvelopBytes(messageBatch.get(0), maxMessageSize); if (envelopBytes.length > 0) { totalEncodedSize += envelopBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(envelopBytes); } for (final Message message : messageBatch) { final byte[] sectionBytes = batchBinaryDataSectionBytes(message, maxMessageSize); if (sectionBytes.length > 0) { totalEncodedSize += sectionBytes.length; if (totalEncodedSize > maxMessageSize) { return batchBufferOverflowError(maxMessageSize); } buffer.append(sectionBytes); } else { logger.info("Ignoring the empty message org.apache.qpid.proton.message.message@{} in the batch.", Integer.toHexString(System.identityHashCode(message))); } } return send(buffer, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState); }).then(); }
class ReactorSender implements AmqpSendLink, AsyncCloseable, AutoCloseable { private static final String DELIVERY_TAG_KEY = "deliveryTag"; private static final String PENDING_SENDS_SIZE_KEY = "pending_sends_size"; private final String entityPath; private final Sender sender; private final SendLinkHandler handler; private final ReactorProvider reactorProvider; private final Disposable.Composite subscriptions; private final AtomicBoolean hasConnected = new AtomicBoolean(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final Object pendingSendLock = new Object(); private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>(); private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue = new PriorityQueue<>(1000, new DeliveryTagComparator()); private final ClientLogger logger; private final Flux<AmqpEndpointState> endpointStates; private final TokenManager tokenManager; private final MessageSerializer messageSerializer; private final AmqpRetryPolicy retry; private final AmqpRetryOptions retryOptions; private final String activeTimeoutMessage; private final Scheduler scheduler; private final AmqpMetricsProvider metricsProvider; private final Object errorConditionLock = new Object(); private volatile Exception lastKnownLinkError; private volatile Instant lastKnownErrorReportedAt; private volatile int linkSize; /** * Creates an instance of {@link ReactorSender}. * * @param amqpConnection The parent {@link AmqpConnection} that this sender lives in. * @param entityPath The message broker address for the sender. * @param sender The underlying proton-j sender. * @param handler The proton-j handler associated with the sender. * @param reactorProvider Provider to schedule work on the proton-j reactor. * @param tokenManager Token manager for authorising with the CBS node. Can be {@code null} if it is part of the * transaction manager. * @param messageSerializer Serializer to deserialise and serialize AMQP messages. * @param retryOptions Retry options. * @param scheduler Scheduler to schedule send timeout. */ ReactorSender(AmqpConnection amqpConnection, String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider, TokenManager tokenManager, MessageSerializer messageSerializer, AmqpRetryOptions retryOptions, Scheduler scheduler, AmqpMetricsProvider metricsProvider) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.sender = Objects.requireNonNull(sender, "'sender' cannot be null."); this.handler = Objects.requireNonNull(handler, "'handler' cannot be null."); this.reactorProvider = Objects.requireNonNull(reactorProvider, "'reactorProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.scheduler = Objects.requireNonNull(scheduler, "'scheduler' cannot be null."); this.retry = RetryUtil.getRetryPolicy(retryOptions); this.tokenManager = tokenManager; this.metricsProvider = metricsProvider; String connectionId = handler.getConnectionId() == null ? NOT_APPLICABLE : handler.getConnectionId(); String linkName = getLinkName() == null ? NOT_APPLICABLE : getLinkName(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(LINK_NAME_KEY, linkName); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(ReactorSender.class, loggingContext); this.activeTimeoutMessage = String.format( "ReactorSender connectionId[%s] linkName[%s]: Waiting for send and receive handler to be ACTIVE", handler.getConnectionId(), handler.getLinkName()); this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.verbose("State {}", state); this.hasConnected.set(state == EndpointState.ACTIVE); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { hasConnected.set(false); handleError(error); }) .doOnComplete(() -> { hasConnected.set(false); handleClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage), this.handler.getLinkCredits().subscribe(credit -> { logger.atVerbose().addKeyValue("credits", credit) .log("Credits on link."); this.scheduleWorkOnDispatcher(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); hasConnected.set(false); return closeAsync("Connection shutdown.", null); }).subscribe() ); if (tokenManager != null) { this.subscriptions.add(tokenManager.getAuthorizationResults().onErrorResume(error -> { final Mono<Void> operation = closeAsync(String.format("connectionId[%s] linkName[%s] Token renewal failure. Disposing send " + "link.", amqpConnection.getId(), getLinkName()), new ErrorCondition(Symbol.getSymbol(NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> { logger.atVerbose().addKeyValue("response", response) .log("Token refreshed."); }, error -> { }, () -> { logger.verbose(" Authorization completed. Disposing."); closeAsync("Authorization completed. Disposing.", null).subscribe(); })); } } @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } @Override public Mono<Void> send(Message message) { return send(message, null); } @Override public Mono<Void> send(Message message, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish message when disposed.", handler.getConnectionId(), getLinkName()))); } return getLinkSize() .flatMap(maxMessageSize -> { final int payloadSize = messageSerializer.getSize(message); final int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] bytes = new byte[allocationSize]; int encodedSize; try { encodedSize = message.encode(bytes, 0, allocationSize); } catch (BufferOverflowException exception) { final String errorMessage = String.format(Locale.US, "Error sending. Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024); final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, errorMessage, exception, handler.getErrorContext(sender)); return Mono.error(error); } return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState); }).then(); } @Override public Mono<Void> send(List<Message> messageBatch) { return send(messageBatch, null); } @Override private byte[] batchEnvelopBytes(Message envelopMessage, int maxMessageSize) { final Message message = Proton.message(); message.setMessageAnnotations(envelopMessage.getMessageAnnotations()); final int size = messageSerializer.getSize(message); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = message.encode(encodedBytes, 0, allocationSize); return Arrays.copyOf(encodedBytes, encodedSize); } private byte[] batchBinaryDataSectionBytes(Message sectionMessage, int maxMessageSize) { final int size = messageSerializer.getSize(sectionMessage); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = sectionMessage.encode(encodedBytes, 0, allocationSize); final Message message = Proton.message(); final Data binaryData = new Data(new Binary(encodedBytes, 0, encodedSize)); message.setBody(binaryData); final int binaryRawSize = binaryData.getValue().getLength(); final int binaryEncodedSize = binaryEncodedSize(binaryRawSize); final byte[] binaryEncodedBytes = new byte[binaryEncodedSize]; message.encode(binaryEncodedBytes, 0, binaryEncodedSize); return binaryEncodedBytes; } private Mono<Void> batchBufferOverflowError(int maxMessageSize) { return Mono.error(new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), new BufferOverflowException(), handler.getErrorContext(sender))); } /** * Compute the encoded size when encoding a binary data of given size per Amqp 1.0 spec "amqp:data:binary" format. * * @param binaryRawSize the length of the binary data. * @return the encoded size. */ private int binaryEncodedSize(int binaryRawSize) { if (binaryRawSize <= 255) { return 5 + binaryRawSize; } else { return 8 + binaryRawSize; } } @Override public AmqpErrorContext getErrorContext() { return handler.getErrorContext(sender); } @Override public String getLinkName() { return sender.getName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override public Mono<Integer> getLinkSize() { if (linkSize > 0) { return Mono.defer(() -> Mono.just(this.linkSize)); } synchronized (this) { if (linkSize > 0) { return Mono.defer(() -> Mono.just(linkSize)); } return RetryUtil.withRetry(getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE), retryOptions, activeTimeoutMessage) .then(Mono.fromCallable(() -> { final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize(); if (remoteMaxMessageSize != null) { linkSize = remoteMaxMessageSize.intValue(); } else { logger.warning("Could not get the getRemoteMaxMessageSize. Returning current link size: {}", linkSize); } return linkSize; })); } } @Override public boolean isDisposed() { return isDisposed.get(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void dispose() { close(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } /** * Disposes of the sender. * * @param errorCondition Error condition associated with close operation. * @param message Message associated with why the sender was closed. * * @return A mono that completes when the send link has closed. */ Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return isClosedMono.asMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .log("Setting error condition and disposing. {}", message); final Runnable closeWork = () -> { if (errorCondition != null && sender.getCondition() == null) { sender.setCondition(errorCondition); } sender.close(); }; return Mono.fromRunnable(() -> { try { reactorProvider.getReactorDispatcher().invoke(closeWork); } catch (IOException e) { logger.warning("Could not schedule close work. Running manually. And completing close.", e); closeWork.run(); handleClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException scheduling close work. And completing close."); closeWork.run(); handleClose(); } }).then(isClosedMono.asMono()) .publishOn(Schedulers.boundedElastic()); } /** * A mono that completes when the sender has completely closed. * * @return mono that completes when the sender has completely closed. */ Mono<Void> isClosed() { return isClosedMono.asMono(); } @Override public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) { return onEndpointActive().then(Mono.create(sink -> { sendWork(new RetriableWorkItem(bytes, arrayOffset, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } Mono<DeliveryState> send(ReadableBuffer buffer, int messageFormat, DeliveryState deliveryState) { return onEndpointActive().then(Mono.create(sink -> { sendWork(new RetriableWorkItem(buffer, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } private Flux<EndpointState> onEndpointActive() { return RetryUtil.withRetry(handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), retryOptions, activeTimeoutMessage); } /** * Add the work item in pending send to be processed on {@link ReactorDispatcher} thread. * * @param workItem to be processed. */ private void sendWork(RetriableWorkItem workItem) { final String deliveryTag = UUID.randomUUID().toString().replace("-", ""); synchronized (pendingSendLock) { this.pendingSendsMap.put(deliveryTag, workItem); this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0)); } this.scheduleWorkOnDispatcher(); } /** * Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke() */ private void processSendWork() { if (!hasConnected.get()) { logger.warning("Not connected. Not processing send work."); return; } if (isDisposed.get()) { logger.info("Sender is closed. Not executing work."); return; } while (hasConnected.get() && sender.getCredit() > 0) { final WeightedDeliveryTag weightedDelivery; final RetriableWorkItem workItem; final String deliveryTag; synchronized (pendingSendLock) { weightedDelivery = this.pendingSendsQueue.poll(); if (weightedDelivery != null) { deliveryTag = weightedDelivery.getDeliveryTag(); workItem = this.pendingSendsMap.get(deliveryTag); } else { workItem = null; deliveryTag = null; } } if (workItem == null) { if (deliveryTag != null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("sendData not found for this delivery."); } break; } Delivery delivery = null; boolean linkAdvance = false; int sentMsgSize = 0; Exception sendException = null; try { workItem.beforeTry(); delivery = sender.delivery(deliveryTag.getBytes(UTF_8)); delivery.setMessageFormat(workItem.getMessageFormat()); if (workItem.isDeliveryStateProvided()) { delivery.disposition(workItem.getDeliveryState()); } workItem.send(sender); linkAdvance = sender.advance(); } catch (Exception exception) { sendException = exception; } if (linkAdvance) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Sent message."); workItem.setWaitingForAck(); scheduler.schedule(new SendTimeout(deliveryTag), retryOptions.getTryTimeout().toMillis(), TimeUnit.MILLISECONDS); } else { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("sentMessageSize", sentMsgSize) .addKeyValue("payloadActualSize", workItem.getEncodedMessageSize()) .log("Sendlink advance failed."); DeliveryState outcome = null; if (delivery != null) { outcome = delivery.getRemoteState(); delivery.free(); } final AmqpErrorContext context = handler.getErrorContext(sender); final Throwable exception = sendException != null ? new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed. Please see cause for more details", entityPath), sendException, context) : new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed while advancing delivery(tag: %s).", entityPath, deliveryTag), context); workItem.error(exception, outcome); } } } private void processDeliveredMessage(Delivery delivery) { final DeliveryState outcome = delivery.getRemoteState(); final String deliveryTag = new String(delivery.getTag(), UTF_8); logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Process delivered message."); final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Mismatch (or send timed out)."); return; } else if (workItem.isDeliveryStateProvided()) { workItem.success(outcome); return; } if (outcome instanceof Accepted) { synchronized (errorConditionLock) { lastKnownLinkError = null; lastKnownErrorReportedAt = null; retryAttempts.set(0); } workItem.success(outcome); } else if (outcome instanceof Rejected) { final Rejected rejected = (Rejected) outcome; final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError(); final Exception exception = ExceptionUtil.toException(error.getCondition().toString(), error.getDescription(), handler.getErrorContext(sender)); logger.atWarning() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("rejected", rejected) .log("Delivery rejected."); final int retryAttempt; if (isGeneralSendError(error.getCondition())) { synchronized (errorConditionLock) { lastKnownLinkError = exception; lastKnownErrorReportedAt = Instant.now(); retryAttempt = retryAttempts.incrementAndGet(); } } else { retryAttempt = retryAttempts.get(); } final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt); if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) { cleanupFailedSend(workItem, exception, outcome); } else { workItem.setLastKnownException(exception); try { reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval); } catch (IOException | RejectedExecutionException schedulerException) { exception.initCause(schedulerException); cleanupFailedSend( workItem, new AmqpException(false, String.format(Locale.US, "Entity(%s): send operation failed while scheduling a" + " retry on Reactor, see cause for more details.", entityPath), schedulerException, handler.getErrorContext(sender)), outcome); } } } else if (outcome instanceof Released) { cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(), handler.getErrorContext(sender)), outcome); } else if (outcome instanceof Declared) { final Declared declared = (Declared) outcome; workItem.success(declared); } else { cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(), handler.getErrorContext(sender)), outcome); } } private void scheduleWorkOnDispatcher() { try { reactorProvider.getReactorDispatcher().invoke(this::processSendWork); } catch (IOException e) { logger.warning("Error scheduling work on reactor.", e); } catch (RejectedExecutionException e) { logger.info("Error scheduling work on reactor because of RejectedExecutionException."); } } private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception, final DeliveryState deliveryState) { workItem.error(exception, deliveryState); } private void completeClose() { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result).log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } } /** * Clears pending sends and puts an error in there. * * @param error Error to pass to pending sends. */ private void handleError(Throwable error) { synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed. Dropping error."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends with error."); } pendingSendsMap.forEach((key, value) -> value.error(error, null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private void handleClose() { final String message = String.format("Could not complete sends because link '%s' for '%s' is closed.", getLinkName(), entityPath); final AmqpErrorContext context = handler.getErrorContext(sender); synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends."); } pendingSendsMap.forEach((key, value) -> value.error(new AmqpException(true, message, context), null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private static boolean isGeneralSendError(Symbol amqpError) { return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR || amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED); } private static class WeightedDeliveryTag { private final String deliveryTag; private final int priority; WeightedDeliveryTag(final String deliveryTag, final int priority) { this.deliveryTag = deliveryTag; this.priority = priority; } private String getDeliveryTag() { return this.deliveryTag; } private int getPriority() { return this.priority; } } private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable { private static final long serialVersionUID = -7057500582037295635L; @Override public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) { return deliveryTag1.getPriority() - deliveryTag0.getPriority(); } } /** * Keeps track of messages that have been sent, but may not have been acknowledged by the service. */ private class SendTimeout implements Runnable { private final String deliveryTag; SendTimeout(String deliveryTag) { this.deliveryTag = deliveryTag; } @Override public void run() { final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { return; } Exception cause = lastKnownLinkError; final Exception lastError; final Instant lastErrorTime; synchronized (errorConditionLock) { lastError = lastKnownLinkError; lastErrorTime = lastKnownErrorReportedAt; } if (lastError != null && lastErrorTime != null) { final Instant now = Instant.now(); final boolean isLastErrorAfterSleepTime = lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS)); final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime; final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(retryOptions.getTryTimeout())); cause = isServerBusy || isLastErrorAfterOperationTimeout ? lastError : null; } final AmqpException exception; if (cause instanceof AmqpException) { exception = (AmqpException) cause; } else { exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR, String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath), handler.getErrorContext(sender)); } workItem.error(exception, null); } } }
class ReactorSender implements AmqpSendLink, AsyncCloseable, AutoCloseable { private static final String DELIVERY_TAG_KEY = "deliveryTag"; private static final String PENDING_SENDS_SIZE_KEY = "pending_sends_size"; private final String entityPath; private final Sender sender; private final SendLinkHandler handler; private final ReactorProvider reactorProvider; private final Disposable.Composite subscriptions; private final AtomicBoolean hasConnected = new AtomicBoolean(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final Object pendingSendLock = new Object(); private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>(); private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue = new PriorityQueue<>(1000, new DeliveryTagComparator()); private final ClientLogger logger; private final Flux<AmqpEndpointState> endpointStates; private final TokenManager tokenManager; private final MessageSerializer messageSerializer; private final AmqpRetryPolicy retry; private final AmqpRetryOptions retryOptions; private final String activeTimeoutMessage; private final Scheduler scheduler; private final AmqpMetricsProvider metricsProvider; private final Object errorConditionLock = new Object(); private volatile Exception lastKnownLinkError; private volatile Instant lastKnownErrorReportedAt; private volatile int linkSize; /** * Creates an instance of {@link ReactorSender}. * * @param amqpConnection The parent {@link AmqpConnection} that this sender lives in. * @param entityPath The message broker address for the sender. * @param sender The underlying proton-j sender. * @param handler The proton-j handler associated with the sender. * @param reactorProvider Provider to schedule work on the proton-j reactor. * @param tokenManager Token manager for authorising with the CBS node. Can be {@code null} if it is part of the * transaction manager. * @param messageSerializer Serializer to deserialise and serialize AMQP messages. * @param retryOptions Retry options. * @param scheduler Scheduler to schedule send timeout. */ ReactorSender(AmqpConnection amqpConnection, String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider, TokenManager tokenManager, MessageSerializer messageSerializer, AmqpRetryOptions retryOptions, Scheduler scheduler, AmqpMetricsProvider metricsProvider) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.sender = Objects.requireNonNull(sender, "'sender' cannot be null."); this.handler = Objects.requireNonNull(handler, "'handler' cannot be null."); this.reactorProvider = Objects.requireNonNull(reactorProvider, "'reactorProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.scheduler = Objects.requireNonNull(scheduler, "'scheduler' cannot be null."); this.retry = RetryUtil.getRetryPolicy(retryOptions); this.tokenManager = tokenManager; this.metricsProvider = metricsProvider; String connectionId = handler.getConnectionId() == null ? NOT_APPLICABLE : handler.getConnectionId(); String linkName = getLinkName() == null ? NOT_APPLICABLE : getLinkName(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(LINK_NAME_KEY, linkName); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(ReactorSender.class, loggingContext); this.activeTimeoutMessage = String.format( "ReactorSender connectionId[%s] linkName[%s]: Waiting for send and receive handler to be ACTIVE", handler.getConnectionId(), handler.getLinkName()); this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.verbose("State {}", state); this.hasConnected.set(state == EndpointState.ACTIVE); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { hasConnected.set(false); handleError(error); }) .doOnComplete(() -> { hasConnected.set(false); handleClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage), this.handler.getLinkCredits().subscribe(credit -> { logger.atVerbose().addKeyValue("credits", credit) .log("Credits on link."); this.scheduleWorkOnDispatcher(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); hasConnected.set(false); return closeAsync("Connection shutdown.", null); }).subscribe() ); if (tokenManager != null) { this.subscriptions.add(tokenManager.getAuthorizationResults().onErrorResume(error -> { final Mono<Void> operation = closeAsync(String.format("connectionId[%s] linkName[%s] Token renewal failure. Disposing send " + "link.", amqpConnection.getId(), getLinkName()), new ErrorCondition(Symbol.getSymbol(NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> { logger.atVerbose().addKeyValue("response", response) .log("Token refreshed."); }, error -> { }, () -> { logger.verbose(" Authorization completed. Disposing."); closeAsync("Authorization completed. Disposing.", null).subscribe(); })); } } @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } @Override public Mono<Void> send(Message message) { return send(message, null); } @Override public Mono<Void> send(Message message, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish message when disposed.", handler.getConnectionId(), getLinkName()))); } return getLinkSize() .flatMap(maxMessageSize -> { final int payloadSize = messageSerializer.getSize(message); final int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] bytes = new byte[allocationSize]; int encodedSize; try { encodedSize = message.encode(bytes, 0, allocationSize); } catch (BufferOverflowException exception) { final String errorMessage = String.format(Locale.US, "Error sending. Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024); final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, errorMessage, exception, handler.getErrorContext(sender)); return Mono.error(error); } return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState); }).then(); } @Override public Mono<Void> send(List<Message> messageBatch) { return send(messageBatch, null); } @Override private byte[] batchEnvelopBytes(Message envelopMessage, int maxMessageSize) { final Message message = Proton.message(); message.setMessageAnnotations(envelopMessage.getMessageAnnotations()); if ((envelopMessage.getMessageId() instanceof String) && !CoreUtils.isNullOrEmpty((String) envelopMessage.getMessageId())) { message.setMessageId(envelopMessage.getMessageId()); } if (!CoreUtils.isNullOrEmpty(envelopMessage.getGroupId())) { message.setGroupId(envelopMessage.getGroupId()); } final int size = messageSerializer.getSize(message); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = message.encode(encodedBytes, 0, allocationSize); return Arrays.copyOf(encodedBytes, encodedSize); } private byte[] batchBinaryDataSectionBytes(Message sectionMessage, int maxMessageSize) { final int size = messageSerializer.getSize(sectionMessage); final int allocationSize = Math.min(size + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] encodedBytes = new byte[allocationSize]; final int encodedSize = sectionMessage.encode(encodedBytes, 0, allocationSize); final Message message = Proton.message(); final Data binaryData = new Data(new Binary(encodedBytes, 0, encodedSize)); message.setBody(binaryData); final int binaryRawSize = binaryData.getValue().getLength(); final int binaryEncodedSize = binaryEncodedSize(binaryRawSize); final byte[] binaryEncodedBytes = new byte[binaryEncodedSize]; message.encode(binaryEncodedBytes, 0, binaryEncodedSize); return binaryEncodedBytes; } private Mono<Void> batchBufferOverflowError(int maxMessageSize) { return FluxUtil.monoError(logger, new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), new BufferOverflowException(), handler.getErrorContext(sender))); } /** * Compute the encoded size when encoding a binary data of given size per Amqp 1.0 spec "amqp:data:binary" format. * * @param binaryRawSize the length of the binary data. * @return the encoded size. */ private int binaryEncodedSize(int binaryRawSize) { if (binaryRawSize <= 255) { return 5 + binaryRawSize; } else { return 8 + binaryRawSize; } } @Override public AmqpErrorContext getErrorContext() { return handler.getErrorContext(sender); } @Override public String getLinkName() { return sender.getName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override public Mono<Integer> getLinkSize() { if (linkSize > 0) { return Mono.defer(() -> Mono.just(this.linkSize)); } synchronized (this) { if (linkSize > 0) { return Mono.defer(() -> Mono.just(linkSize)); } return RetryUtil.withRetry(getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE), retryOptions, activeTimeoutMessage) .then(Mono.fromCallable(() -> { final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize(); if (remoteMaxMessageSize != null) { linkSize = remoteMaxMessageSize.intValue(); } else { logger.warning("Could not get the getRemoteMaxMessageSize. Returning current link size: {}", linkSize); } return linkSize; })); } } @Override public boolean isDisposed() { return isDisposed.get(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void dispose() { close(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } /** * Disposes of the sender. * * @param errorCondition Error condition associated with close operation. * @param message Message associated with why the sender was closed. * * @return A mono that completes when the send link has closed. */ Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return isClosedMono.asMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .log("Setting error condition and disposing. {}", message); final Runnable closeWork = () -> { if (errorCondition != null && sender.getCondition() == null) { sender.setCondition(errorCondition); } sender.close(); }; return Mono.fromRunnable(() -> { try { reactorProvider.getReactorDispatcher().invoke(closeWork); } catch (IOException e) { logger.warning("Could not schedule close work. Running manually. And completing close.", e); closeWork.run(); handleClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException scheduling close work. And completing close."); closeWork.run(); handleClose(); } }).then(isClosedMono.asMono()) .publishOn(Schedulers.boundedElastic()); } /** * A mono that completes when the sender has completely closed. * * @return mono that completes when the sender has completely closed. */ Mono<Void> isClosed() { return isClosedMono.asMono(); } @Override public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) { return onEndpointActive().then(Mono.create(sink -> { sendWork(new RetriableWorkItem(bytes, arrayOffset, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } Mono<DeliveryState> send(ReadableBuffer buffer, int messageFormat, DeliveryState deliveryState) { return onEndpointActive().then(Mono.create(sink -> { sendWork(new RetriableWorkItem(buffer, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState, metricsProvider)); })); } private Flux<EndpointState> onEndpointActive() { return RetryUtil.withRetry(handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), retryOptions, activeTimeoutMessage); } /** * Add the work item in pending send to be processed on {@link ReactorDispatcher} thread. * * @param workItem to be processed. */ private void sendWork(RetriableWorkItem workItem) { final String deliveryTag = UUID.randomUUID().toString().replace("-", ""); synchronized (pendingSendLock) { this.pendingSendsMap.put(deliveryTag, workItem); this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0)); } this.scheduleWorkOnDispatcher(); } /** * Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke() */ private void processSendWork() { if (!hasConnected.get()) { logger.warning("Not connected. Not processing send work."); return; } if (isDisposed.get()) { logger.info("Sender is closed. Not executing work."); return; } while (hasConnected.get() && sender.getCredit() > 0) { final WeightedDeliveryTag weightedDelivery; final RetriableWorkItem workItem; final String deliveryTag; synchronized (pendingSendLock) { weightedDelivery = this.pendingSendsQueue.poll(); if (weightedDelivery != null) { deliveryTag = weightedDelivery.getDeliveryTag(); workItem = this.pendingSendsMap.get(deliveryTag); } else { workItem = null; deliveryTag = null; } } if (workItem == null) { if (deliveryTag != null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("sendData not found for this delivery."); } break; } Delivery delivery = null; boolean linkAdvance = false; int sentMsgSize = 0; Exception sendException = null; try { workItem.beforeTry(); delivery = sender.delivery(deliveryTag.getBytes(UTF_8)); delivery.setMessageFormat(workItem.getMessageFormat()); if (workItem.isDeliveryStateProvided()) { delivery.disposition(workItem.getDeliveryState()); } workItem.send(sender); linkAdvance = sender.advance(); } catch (Exception exception) { sendException = exception; } if (linkAdvance) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Sent message."); workItem.setWaitingForAck(); scheduler.schedule(new SendTimeout(deliveryTag), retryOptions.getTryTimeout().toMillis(), TimeUnit.MILLISECONDS); } else { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("sentMessageSize", sentMsgSize) .addKeyValue("payloadActualSize", workItem.getEncodedMessageSize()) .log("Sendlink advance failed."); DeliveryState outcome = null; if (delivery != null) { outcome = delivery.getRemoteState(); delivery.free(); } final AmqpErrorContext context = handler.getErrorContext(sender); final Throwable exception = sendException != null ? new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed. Please see cause for more details", entityPath), sendException, context) : new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed while advancing delivery(tag: %s).", entityPath, deliveryTag), context); workItem.error(exception, outcome); } } } private void processDeliveredMessage(Delivery delivery) { final DeliveryState outcome = delivery.getRemoteState(); final String deliveryTag = new String(delivery.getTag(), UTF_8); logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Process delivered message."); final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Mismatch (or send timed out)."); return; } else if (workItem.isDeliveryStateProvided()) { workItem.success(outcome); return; } if (outcome instanceof Accepted) { synchronized (errorConditionLock) { lastKnownLinkError = null; lastKnownErrorReportedAt = null; retryAttempts.set(0); } workItem.success(outcome); } else if (outcome instanceof Rejected) { final Rejected rejected = (Rejected) outcome; final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError(); final Exception exception = ExceptionUtil.toException(error.getCondition().toString(), error.getDescription(), handler.getErrorContext(sender)); logger.atWarning() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("rejected", rejected) .log("Delivery rejected."); final int retryAttempt; if (isGeneralSendError(error.getCondition())) { synchronized (errorConditionLock) { lastKnownLinkError = exception; lastKnownErrorReportedAt = Instant.now(); retryAttempt = retryAttempts.incrementAndGet(); } } else { retryAttempt = retryAttempts.get(); } final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt); if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) { cleanupFailedSend(workItem, exception, outcome); } else { workItem.setLastKnownException(exception); try { reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval); } catch (IOException | RejectedExecutionException schedulerException) { exception.initCause(schedulerException); cleanupFailedSend( workItem, new AmqpException(false, String.format(Locale.US, "Entity(%s): send operation failed while scheduling a" + " retry on Reactor, see cause for more details.", entityPath), schedulerException, handler.getErrorContext(sender)), outcome); } } } else if (outcome instanceof Released) { cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(), handler.getErrorContext(sender)), outcome); } else if (outcome instanceof Declared) { final Declared declared = (Declared) outcome; workItem.success(declared); } else { cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(), handler.getErrorContext(sender)), outcome); } } private void scheduleWorkOnDispatcher() { try { reactorProvider.getReactorDispatcher().invoke(this::processSendWork); } catch (IOException e) { logger.warning("Error scheduling work on reactor.", e); } catch (RejectedExecutionException e) { logger.info("Error scheduling work on reactor because of RejectedExecutionException."); } } private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception, final DeliveryState deliveryState) { workItem.error(exception, deliveryState); } private void completeClose() { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result).log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } } /** * Clears pending sends and puts an error in there. * * @param error Error to pass to pending sends. */ private void handleError(Throwable error) { synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed. Dropping error."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends with error."); } pendingSendsMap.forEach((key, value) -> value.error(error, null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private void handleClose() { final String message = String.format("Could not complete sends because link '%s' for '%s' is closed.", getLinkName(), entityPath); final AmqpErrorContext context = handler.getErrorContext(sender); synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends."); } pendingSendsMap.forEach((key, value) -> value.error(new AmqpException(true, message, context), null)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private static boolean isGeneralSendError(Symbol amqpError) { return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR || amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED); } private static class WeightedDeliveryTag { private final String deliveryTag; private final int priority; WeightedDeliveryTag(final String deliveryTag, final int priority) { this.deliveryTag = deliveryTag; this.priority = priority; } private String getDeliveryTag() { return this.deliveryTag; } private int getPriority() { return this.priority; } } private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable { private static final long serialVersionUID = -7057500582037295635L; @Override public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) { return deliveryTag1.getPriority() - deliveryTag0.getPriority(); } } /** * Keeps track of messages that have been sent, but may not have been acknowledged by the service. */ private class SendTimeout implements Runnable { private final String deliveryTag; SendTimeout(String deliveryTag) { this.deliveryTag = deliveryTag; } @Override public void run() { final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { return; } Exception cause = lastKnownLinkError; final Exception lastError; final Instant lastErrorTime; synchronized (errorConditionLock) { lastError = lastKnownLinkError; lastErrorTime = lastKnownErrorReportedAt; } if (lastError != null && lastErrorTime != null) { final Instant now = Instant.now(); final boolean isLastErrorAfterSleepTime = lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS)); final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime; final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(retryOptions.getTryTimeout())); cause = isServerBusy || isLastErrorAfterOperationTimeout ? lastError : null; } final AmqpException exception; if (cause instanceof AmqpException) { exception = (AmqpException) cause; } else { exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR, String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath), handler.getErrorContext(sender)); } workItem.error(exception, null); } } }
what about `partitionsNeededForMe == 0` Though it will be automatically be covered below, but do we want to include here or not conceptually?
public List<Lease> selectLeasesToTake(List<Lease> allLeases) { Map<String, Integer> workerToPartitionCount = new HashMap<>(); List<Lease> expiredLeases = new ArrayList<>(); Map<String, Lease> allPartitions = new HashMap<>(); this.categorizeLeases(allLeases, allPartitions, expiredLeases, workerToPartitionCount); int partitionCount = allPartitions.size(); int workerCount = workerToPartitionCount.size(); if (partitionCount <= 0) { return new ArrayList<Lease>(); } int target = this.calculateTargetPartitionCount(partitionCount, workerCount); int myCount = workerToPartitionCount.get(this.hostName); int partitionsNeededForMe = target - myCount; if (expiredLeases.size() > 0) { if ((this.maxPartitionCount == 0 && partitionsNeededForMe <= 0) || (partitionsNeededForMe > 1 && workerToPartitionCount.size() > 1)) { partitionsNeededForMe = 1; } if (partitionsNeededForMe == 1) { Random random = new Random(); Lease expiredLease = expiredLeases.get(random.nextInt(expiredLeases.size())); this.logger.info("Found unused or expired lease {} (owner was {}); previous lease count for instance owner {} is {}, count of leases to target is {} and maxScaleCount {} ", expiredLease.getLeaseToken(), expiredLease.getOwner(), this.hostName, myCount, partitionsNeededForMe, this.maxPartitionCount); return Collections.singletonList(expiredLease); } else { for (Lease lease : expiredLeases) { this.logger.info("Found unused or expired lease {} (owner was {}); previous lease count for instance owner {} is {} and maxScaleCount {} ", lease.getLeaseToken(), lease.getOwner(), this.hostName, myCount, this.maxPartitionCount); } } if (partitionsNeededForMe < 0) { return new ArrayList<>(); } return expiredLeases.subList(0, Math.min(partitionsNeededForMe, expiredLeases.size())); } if (partitionsNeededForMe <= 0) return new ArrayList<Lease>(); Lease stolenLease = getLeaseToSteal(workerToPartitionCount, target, partitionsNeededForMe, allPartitions); List<Lease> stolenLeases = new ArrayList<>(); if (stolenLease != null) { stolenLeases.add(stolenLease); } return stolenLeases; }
if (partitionsNeededForMe < 0)
public List<Lease> selectLeasesToTake(List<Lease> allLeases) { Map<String, Integer> workerToPartitionCount = new HashMap<>(); List<Lease> expiredLeases = new ArrayList<>(); Map<String, Lease> allPartitions = new HashMap<>(); this.categorizeLeases(allLeases, allPartitions, expiredLeases, workerToPartitionCount); int partitionCount = allPartitions.size(); int workerCount = workerToPartitionCount.size(); if (partitionCount <= 0) { return new ArrayList<Lease>(); } int target = this.calculateTargetPartitionCount(partitionCount, workerCount); int myCount = workerToPartitionCount.get(this.hostName); int partitionsNeededForMe = target - myCount; if (expiredLeases.size() > 0) { if ((this.maxPartitionCount == 0 && partitionsNeededForMe <= 0) || (partitionsNeededForMe > 1 && workerToPartitionCount.size() > 1)) { partitionsNeededForMe = 1; } if (partitionsNeededForMe == 1) { Random random = new Random(); Lease expiredLease = expiredLeases.get(random.nextInt(expiredLeases.size())); this.logger.info("Found unused or expired lease {} (owner was {}); previous lease count for instance owner {} is {}, count of leases to target is {} and maxScaleCount {} ", expiredLease.getLeaseToken(), expiredLease.getOwner(), this.hostName, myCount, partitionsNeededForMe, this.maxPartitionCount); return Collections.singletonList(expiredLease); } else { for (Lease lease : expiredLeases) { this.logger.info("Found unused or expired lease {} (owner was {}); previous lease count for instance owner {} is {} and maxScaleCount {} ", lease.getLeaseToken(), lease.getOwner(), this.hostName, myCount, this.maxPartitionCount); } } if (partitionsNeededForMe <= 0) { return new ArrayList<>(); } return expiredLeases.subList(0, Math.min(partitionsNeededForMe, expiredLeases.size())); } if (partitionsNeededForMe <= 0) return new ArrayList<Lease>(); Lease stolenLease = getLeaseToSteal(workerToPartitionCount, target, partitionsNeededForMe, allPartitions); List<Lease> stolenLeases = new ArrayList<>(); if (stolenLease != null) { stolenLeases.add(stolenLease); } return stolenLeases; }
class EqualPartitionsBalancingStrategy implements PartitionLoadBalancingStrategy { private final Logger logger = LoggerFactory.getLogger(EqualPartitionsBalancingStrategy.class); private final String hostName; private final int minPartitionCount; private final int maxPartitionCount; private final Duration leaseExpirationInterval; public EqualPartitionsBalancingStrategy(String hostName, int minPartitionCount, int maxPartitionCount, Duration leaseExpirationInterval) { if (hostName == null) { throw new IllegalArgumentException("hostName"); } this.hostName = hostName; this.minPartitionCount = minPartitionCount; this.maxPartitionCount = maxPartitionCount; this.leaseExpirationInterval = leaseExpirationInterval; } @Override private static Lease getLeaseToSteal( Map<String, Integer> workerToPartitionCount, int target, int partitionsNeededForMe, Map<String, Lease> allPartitions) { Map.Entry<String, Integer> workerToStealFrom = findWorkerWithMostPartitions(workerToPartitionCount); if (workerToStealFrom.getValue() > target - (partitionsNeededForMe > 1 ? 1 : 0)) { for (Map.Entry<String, Lease> entry : allPartitions.entrySet()) { if (entry.getValue().getOwner().equalsIgnoreCase(workerToStealFrom.getKey())) { return entry.getValue(); } } } return null; } private static Map.Entry<String, Integer> findWorkerWithMostPartitions(Map<String, Integer> workerToPartitionCount) { Map.Entry<String, Integer> workerToStealFrom = new ChangeFeedHelper.KeyValuePair<>("", 0); for (Map.Entry<String, Integer> entry : workerToPartitionCount.entrySet()) { if (workerToStealFrom.getValue() <= entry.getValue()) { workerToStealFrom = entry; } } return workerToStealFrom; } private int calculateTargetPartitionCount(int partitionCount, int workerCount) { int target = 1; if (partitionCount > workerCount) { target = (int)Math.ceil((double)partitionCount / workerCount); } if (this.maxPartitionCount > 0 && target > this.maxPartitionCount) { target = this.maxPartitionCount; } if (this.minPartitionCount > 0 && target < this.minPartitionCount) { target = this.minPartitionCount; } return target; } private void categorizeLeases( List<Lease> allLeases, Map<String, Lease> allPartitions, List<Lease> expiredLeases, Map<String, Integer> workerToPartitionCount) { for (Lease lease : allLeases) { allPartitions.put(lease.getLeaseToken(), lease); if (lease.getOwner() == null || lease.getOwner().isEmpty() || this.isExpired(lease)) { expiredLeases.add(lease); } else { String assignedTo = lease.getOwner(); Integer count = workerToPartitionCount.get(assignedTo); if (count != null) { workerToPartitionCount.replace(assignedTo, count + 1); } else { workerToPartitionCount.put(assignedTo, 1); } } } if (!workerToPartitionCount.containsKey(this.hostName)) { workerToPartitionCount.put(this.hostName, 0); } } private boolean isExpired(Lease lease) { if (lease.getOwner() == null || lease.getOwner().isEmpty() || lease.getTimestamp() == null) { return true; } Instant leaseExpireTime = Instant.parse(lease.getTimestamp()).plus(this.leaseExpirationInterval); this.logger.debug("Current lease timestamp: {}, current time: {}", leaseExpireTime, Instant.now()); return leaseExpireTime.isBefore(Instant.now()); } }
class EqualPartitionsBalancingStrategy implements PartitionLoadBalancingStrategy { private final Logger logger = LoggerFactory.getLogger(EqualPartitionsBalancingStrategy.class); private final String hostName; private final int minPartitionCount; private final int maxPartitionCount; private final Duration leaseExpirationInterval; public EqualPartitionsBalancingStrategy(String hostName, int minPartitionCount, int maxPartitionCount, Duration leaseExpirationInterval) { if (hostName == null) { throw new IllegalArgumentException("hostName"); } this.hostName = hostName; this.minPartitionCount = minPartitionCount; this.maxPartitionCount = maxPartitionCount; this.leaseExpirationInterval = leaseExpirationInterval; } @Override private static Lease getLeaseToSteal( Map<String, Integer> workerToPartitionCount, int target, int partitionsNeededForMe, Map<String, Lease> allPartitions) { Map.Entry<String, Integer> workerToStealFrom = findWorkerWithMostPartitions(workerToPartitionCount); if (workerToStealFrom.getValue() > target - (partitionsNeededForMe > 1 ? 1 : 0)) { for (Map.Entry<String, Lease> entry : allPartitions.entrySet()) { if (entry.getValue().getOwner().equalsIgnoreCase(workerToStealFrom.getKey())) { return entry.getValue(); } } } return null; } private static Map.Entry<String, Integer> findWorkerWithMostPartitions(Map<String, Integer> workerToPartitionCount) { Map.Entry<String, Integer> workerToStealFrom = new ChangeFeedHelper.KeyValuePair<>("", 0); for (Map.Entry<String, Integer> entry : workerToPartitionCount.entrySet()) { if (workerToStealFrom.getValue() <= entry.getValue()) { workerToStealFrom = entry; } } return workerToStealFrom; } private int calculateTargetPartitionCount(int partitionCount, int workerCount) { int target = 1; if (partitionCount > workerCount) { target = (int)Math.ceil((double)partitionCount / workerCount); } if (this.maxPartitionCount > 0 && target > this.maxPartitionCount) { target = this.maxPartitionCount; } if (this.minPartitionCount > 0 && target < this.minPartitionCount) { target = this.minPartitionCount; } return target; } private void categorizeLeases( List<Lease> allLeases, Map<String, Lease> allPartitions, List<Lease> expiredLeases, Map<String, Integer> workerToPartitionCount) { for (Lease lease : allLeases) { allPartitions.put(lease.getLeaseToken(), lease); if (lease.getOwner() == null || lease.getOwner().isEmpty() || this.isExpired(lease)) { expiredLeases.add(lease); } else { String assignedTo = lease.getOwner(); Integer count = workerToPartitionCount.get(assignedTo); if (count != null) { workerToPartitionCount.replace(assignedTo, count + 1); } else { workerToPartitionCount.put(assignedTo, 1); } } } if (!workerToPartitionCount.containsKey(this.hostName)) { workerToPartitionCount.put(this.hostName, 0); } } private boolean isExpired(Lease lease) { if (lease.getOwner() == null || lease.getOwner().isEmpty() || lease.getTimestamp() == null) { return true; } Instant leaseExpireTime = Instant.parse(lease.getTimestamp()).plus(this.leaseExpirationInterval); this.logger.debug("Current lease timestamp: {}, current time: {}", leaseExpireTime, Instant.now()); return leaseExpireTime.isBefore(Instant.now()); } }
Do we need this `then()` now, given we have a new `then()` down below?
private Mono<Void> run(CancellationToken cancellationToken) { return Flux.just(this) .flatMap(value -> this.leaseContainer.getAllLeases()) .collectList() .flatMap(allLeases -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); List<Lease> leasesToTake = this.partitionLoadBalancingStrategy.selectLeasesToTake(allLeases); if (leasesToTake.size() > 0) { this.logger.info("Found {} total leases, taking ownership of {}", allLeases.size(), leasesToTake.size()); } if (cancellationToken.isCancellationRequested()) return Mono.empty(); return Flux.fromIterable(leasesToTake) .limitRate(1) .flatMap(lease -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); return this.partitionController.addOrUpdateLease(lease); }) .then(); }) .onErrorResume(throwable -> { logger.warn("Unexpected exception thrown while trying to acquire available leases", throwable); return Mono.empty(); }) .then( Mono.just(this) .flatMap(value -> { if (cancellationToken.isCancellationRequested()) { return Mono.empty(); } Instant stopTimer = Instant.now().plus(this.leaseAcquireInterval); return Mono.just(value) .delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL) .repeat(() -> { Instant currentTime = Instant.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }) .then(); }) ) .repeat(() -> !cancellationToken.isCancellationRequested()) .then() .onErrorResume(throwable -> { logger.info("Partition load balancer task stopped."); return this.stop(); }); }
.then();
private Mono<Void> run(CancellationToken cancellationToken) { return Flux.just(this) .flatMap(value -> this.leaseContainer.getAllLeases()) .collectList() .flatMap(allLeases -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); List<Lease> leasesToTake = this.partitionLoadBalancingStrategy.selectLeasesToTake(allLeases); if (leasesToTake.size() > 0) { this.logger.info("Found {} total leases, taking ownership of {}", allLeases.size(), leasesToTake.size()); } if (cancellationToken.isCancellationRequested()) return Mono.empty(); return Flux.fromIterable(leasesToTake) .limitRate(1) .flatMap(lease -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); return this.partitionController.addOrUpdateLease(lease); }) .then(); }) .onErrorResume(throwable -> { logger.warn("Unexpected exception thrown while trying to acquire available leases", throwable); return Mono.empty(); }) .then( Mono.just(this) .flatMap(value -> { if (cancellationToken.isCancellationRequested()) { return Mono.empty(); } Instant stopTimer = Instant.now().plus(this.leaseAcquireInterval); return Mono.just(value) .delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL) .repeat(() -> { Instant currentTime = Instant.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }) .then(); }) ) .repeat(() -> !cancellationToken.isCancellationRequested()) .then() .onErrorResume(throwable -> { logger.info("Partition load balancer task stopped."); return this.stop(); }); }
class PartitionLoadBalancerImpl implements PartitionLoadBalancer { private final Logger logger = LoggerFactory.getLogger(PartitionLoadBalancerImpl.class); private final PartitionController partitionController; private final LeaseContainer leaseContainer; private final PartitionLoadBalancingStrategy partitionLoadBalancingStrategy; private final Duration leaseAcquireInterval; private final Scheduler scheduler; private CancellationTokenSource cancellationTokenSource; private volatile boolean started; private final Object lock; public PartitionLoadBalancerImpl( PartitionController partitionController, LeaseContainer leaseContainer, PartitionLoadBalancingStrategy partitionLoadBalancingStrategy, Duration leaseAcquireInterval, Scheduler scheduler) { checkNotNull(partitionController, "Argument 'partitionController' can not be null"); checkNotNull(leaseContainer, "Argument 'leaseContainer' can not be null"); checkNotNull(partitionLoadBalancingStrategy, "Argument 'partitionLoadBalancingStrategy' can not be null"); checkNotNull(scheduler, "Argument 'scheduler' can not be null"); this.partitionController = partitionController; this.leaseContainer = leaseContainer; this.partitionLoadBalancingStrategy = partitionLoadBalancingStrategy; this.leaseAcquireInterval = leaseAcquireInterval; this.scheduler = scheduler; this.started = false; this.lock = new Object(); } @Override public Mono<Void> start() { synchronized (lock) { if (this.started) { throw new IllegalStateException("Partition load balancer already started"); } this.cancellationTokenSource = new CancellationTokenSource(); this.started = true; } return Mono.fromRunnable( () -> { scheduler.schedule(() -> this.run(this.cancellationTokenSource.getToken()).subscribe()); }); } @Override public Mono<Void> stop() { synchronized (lock) { this.started = false; this.cancellationTokenSource.cancel(); } return this.partitionController.shutdown(); } @Override public boolean isRunning() { return this.started; } }
class PartitionLoadBalancerImpl implements PartitionLoadBalancer { private final Logger logger = LoggerFactory.getLogger(PartitionLoadBalancerImpl.class); private final PartitionController partitionController; private final LeaseContainer leaseContainer; private final PartitionLoadBalancingStrategy partitionLoadBalancingStrategy; private final Duration leaseAcquireInterval; private final Scheduler scheduler; private CancellationTokenSource cancellationTokenSource; private volatile boolean started; private final Object lock; public PartitionLoadBalancerImpl( PartitionController partitionController, LeaseContainer leaseContainer, PartitionLoadBalancingStrategy partitionLoadBalancingStrategy, Duration leaseAcquireInterval, Scheduler scheduler) { checkNotNull(partitionController, "Argument 'partitionController' can not be null"); checkNotNull(leaseContainer, "Argument 'leaseContainer' can not be null"); checkNotNull(partitionLoadBalancingStrategy, "Argument 'partitionLoadBalancingStrategy' can not be null"); checkNotNull(scheduler, "Argument 'scheduler' can not be null"); this.partitionController = partitionController; this.leaseContainer = leaseContainer; this.partitionLoadBalancingStrategy = partitionLoadBalancingStrategy; this.leaseAcquireInterval = leaseAcquireInterval; this.scheduler = scheduler; this.started = false; this.lock = new Object(); } @Override public Mono<Void> start() { synchronized (lock) { if (this.started) { throw new IllegalStateException("Partition load balancer already started"); } this.cancellationTokenSource = new CancellationTokenSource(); this.started = true; } return Mono.fromRunnable( () -> { scheduler.schedule(() -> this.run(this.cancellationTokenSource.getToken()).subscribe()); }); } @Override public Mono<Void> stop() { synchronized (lock) { this.started = false; this.cancellationTokenSource.cancel(); } return this.partitionController.shutdown(); } @Override public boolean isRunning() { return this.started; } }
I wonder if placing this above `onErrorResume()` or below makes any difference? Is that what we are doing here?
private Mono<Void> run(CancellationToken cancellationToken) { return Flux.just(this) .flatMap(value -> this.leaseContainer.getAllLeases()) .collectList() .flatMap(allLeases -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); List<Lease> leasesToTake = this.partitionLoadBalancingStrategy.selectLeasesToTake(allLeases); if (leasesToTake.size() > 0) { this.logger.info("Found {} total leases, taking ownership of {}", allLeases.size(), leasesToTake.size()); } if (cancellationToken.isCancellationRequested()) return Mono.empty(); return Flux.fromIterable(leasesToTake) .limitRate(1) .flatMap(lease -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); return this.partitionController.addOrUpdateLease(lease); }) .then(); }) .onErrorResume(throwable -> { logger.warn("Unexpected exception thrown while trying to acquire available leases", throwable); return Mono.empty(); }) .then( Mono.just(this) .flatMap(value -> { if (cancellationToken.isCancellationRequested()) { return Mono.empty(); } Instant stopTimer = Instant.now().plus(this.leaseAcquireInterval); return Mono.just(value) .delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL) .repeat(() -> { Instant currentTime = Instant.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }) .then(); }) ) .repeat(() -> !cancellationToken.isCancellationRequested()) .then() .onErrorResume(throwable -> { logger.info("Partition load balancer task stopped."); return this.stop(); }); }
.then(
private Mono<Void> run(CancellationToken cancellationToken) { return Flux.just(this) .flatMap(value -> this.leaseContainer.getAllLeases()) .collectList() .flatMap(allLeases -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); List<Lease> leasesToTake = this.partitionLoadBalancingStrategy.selectLeasesToTake(allLeases); if (leasesToTake.size() > 0) { this.logger.info("Found {} total leases, taking ownership of {}", allLeases.size(), leasesToTake.size()); } if (cancellationToken.isCancellationRequested()) return Mono.empty(); return Flux.fromIterable(leasesToTake) .limitRate(1) .flatMap(lease -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); return this.partitionController.addOrUpdateLease(lease); }) .then(); }) .onErrorResume(throwable -> { logger.warn("Unexpected exception thrown while trying to acquire available leases", throwable); return Mono.empty(); }) .then( Mono.just(this) .flatMap(value -> { if (cancellationToken.isCancellationRequested()) { return Mono.empty(); } Instant stopTimer = Instant.now().plus(this.leaseAcquireInterval); return Mono.just(value) .delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL) .repeat(() -> { Instant currentTime = Instant.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }) .then(); }) ) .repeat(() -> !cancellationToken.isCancellationRequested()) .then() .onErrorResume(throwable -> { logger.info("Partition load balancer task stopped."); return this.stop(); }); }
class PartitionLoadBalancerImpl implements PartitionLoadBalancer { private final Logger logger = LoggerFactory.getLogger(PartitionLoadBalancerImpl.class); private final PartitionController partitionController; private final LeaseContainer leaseContainer; private final PartitionLoadBalancingStrategy partitionLoadBalancingStrategy; private final Duration leaseAcquireInterval; private final Scheduler scheduler; private CancellationTokenSource cancellationTokenSource; private volatile boolean started; private final Object lock; public PartitionLoadBalancerImpl( PartitionController partitionController, LeaseContainer leaseContainer, PartitionLoadBalancingStrategy partitionLoadBalancingStrategy, Duration leaseAcquireInterval, Scheduler scheduler) { checkNotNull(partitionController, "Argument 'partitionController' can not be null"); checkNotNull(leaseContainer, "Argument 'leaseContainer' can not be null"); checkNotNull(partitionLoadBalancingStrategy, "Argument 'partitionLoadBalancingStrategy' can not be null"); checkNotNull(scheduler, "Argument 'scheduler' can not be null"); this.partitionController = partitionController; this.leaseContainer = leaseContainer; this.partitionLoadBalancingStrategy = partitionLoadBalancingStrategy; this.leaseAcquireInterval = leaseAcquireInterval; this.scheduler = scheduler; this.started = false; this.lock = new Object(); } @Override public Mono<Void> start() { synchronized (lock) { if (this.started) { throw new IllegalStateException("Partition load balancer already started"); } this.cancellationTokenSource = new CancellationTokenSource(); this.started = true; } return Mono.fromRunnable( () -> { scheduler.schedule(() -> this.run(this.cancellationTokenSource.getToken()).subscribe()); }); } @Override public Mono<Void> stop() { synchronized (lock) { this.started = false; this.cancellationTokenSource.cancel(); } return this.partitionController.shutdown(); } @Override public boolean isRunning() { return this.started; } }
class PartitionLoadBalancerImpl implements PartitionLoadBalancer { private final Logger logger = LoggerFactory.getLogger(PartitionLoadBalancerImpl.class); private final PartitionController partitionController; private final LeaseContainer leaseContainer; private final PartitionLoadBalancingStrategy partitionLoadBalancingStrategy; private final Duration leaseAcquireInterval; private final Scheduler scheduler; private CancellationTokenSource cancellationTokenSource; private volatile boolean started; private final Object lock; public PartitionLoadBalancerImpl( PartitionController partitionController, LeaseContainer leaseContainer, PartitionLoadBalancingStrategy partitionLoadBalancingStrategy, Duration leaseAcquireInterval, Scheduler scheduler) { checkNotNull(partitionController, "Argument 'partitionController' can not be null"); checkNotNull(leaseContainer, "Argument 'leaseContainer' can not be null"); checkNotNull(partitionLoadBalancingStrategy, "Argument 'partitionLoadBalancingStrategy' can not be null"); checkNotNull(scheduler, "Argument 'scheduler' can not be null"); this.partitionController = partitionController; this.leaseContainer = leaseContainer; this.partitionLoadBalancingStrategy = partitionLoadBalancingStrategy; this.leaseAcquireInterval = leaseAcquireInterval; this.scheduler = scheduler; this.started = false; this.lock = new Object(); } @Override public Mono<Void> start() { synchronized (lock) { if (this.started) { throw new IllegalStateException("Partition load balancer already started"); } this.cancellationTokenSource = new CancellationTokenSource(); this.started = true; } return Mono.fromRunnable( () -> { scheduler.schedule(() -> this.run(this.cancellationTokenSource.getToken()).subscribe()); }); } @Override public Mono<Void> stop() { synchronized (lock) { this.started = false; this.cancellationTokenSource.cancel(); } return this.partitionController.shutdown(); } @Override public boolean isRunning() { return this.started; } }
one extra `then()`?
private Mono<Void> run(CancellationToken cancellationToken) { return Flux.just(this) .flatMap(value -> this.leaseContainer.getAllLeases()) .collectList() .flatMap(allLeases -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); List<Lease> leasesToTake = this.partitionLoadBalancingStrategy.selectLeasesToTake(allLeases); if (leasesToTake.size() > 0) { this.logger.info("Found {} total leases, taking ownership of {}", allLeases.size(), leasesToTake.size()); } if (cancellationToken.isCancellationRequested()) return Mono.empty(); return Flux.fromIterable(leasesToTake) .limitRate(1) .flatMap(lease -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); return this.partitionController.addOrUpdateLease(lease); }) .then(); }) .onErrorResume(throwable -> { logger.warn("Unexpected exception thrown while trying to acquire available leases", throwable); return Mono.empty(); }) .then( Mono.just(this) .flatMap(value -> { if (cancellationToken.isCancellationRequested()) { return Mono.empty(); } Instant stopTimer = Instant.now().plus(this.leaseAcquireInterval); return Mono.just(value) .delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL) .repeat(() -> { Instant currentTime = Instant.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }) .then(); }) ) .repeat(() -> !cancellationToken.isCancellationRequested()) .then() .onErrorResume(throwable -> { logger.info("Partition load balancer task stopped."); return this.stop(); }); }
.then();
private Mono<Void> run(CancellationToken cancellationToken) { return Flux.just(this) .flatMap(value -> this.leaseContainer.getAllLeases()) .collectList() .flatMap(allLeases -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); List<Lease> leasesToTake = this.partitionLoadBalancingStrategy.selectLeasesToTake(allLeases); if (leasesToTake.size() > 0) { this.logger.info("Found {} total leases, taking ownership of {}", allLeases.size(), leasesToTake.size()); } if (cancellationToken.isCancellationRequested()) return Mono.empty(); return Flux.fromIterable(leasesToTake) .limitRate(1) .flatMap(lease -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); return this.partitionController.addOrUpdateLease(lease); }) .then(); }) .onErrorResume(throwable -> { logger.warn("Unexpected exception thrown while trying to acquire available leases", throwable); return Mono.empty(); }) .then( Mono.just(this) .flatMap(value -> { if (cancellationToken.isCancellationRequested()) { return Mono.empty(); } Instant stopTimer = Instant.now().plus(this.leaseAcquireInterval); return Mono.just(value) .delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL) .repeat(() -> { Instant currentTime = Instant.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }) .then(); }) ) .repeat(() -> !cancellationToken.isCancellationRequested()) .then() .onErrorResume(throwable -> { logger.info("Partition load balancer task stopped."); return this.stop(); }); }
class PartitionLoadBalancerImpl implements PartitionLoadBalancer { private final Logger logger = LoggerFactory.getLogger(PartitionLoadBalancerImpl.class); private final PartitionController partitionController; private final LeaseContainer leaseContainer; private final PartitionLoadBalancingStrategy partitionLoadBalancingStrategy; private final Duration leaseAcquireInterval; private final Scheduler scheduler; private CancellationTokenSource cancellationTokenSource; private volatile boolean started; private final Object lock; public PartitionLoadBalancerImpl( PartitionController partitionController, LeaseContainer leaseContainer, PartitionLoadBalancingStrategy partitionLoadBalancingStrategy, Duration leaseAcquireInterval, Scheduler scheduler) { if (partitionController == null) { throw new IllegalArgumentException("partitionController"); } if (leaseContainer == null) { throw new IllegalArgumentException("leaseContainer"); } if (partitionLoadBalancingStrategy == null) { throw new IllegalArgumentException("partitionLoadBalancingStrategy"); } if (scheduler == null) { throw new IllegalArgumentException("executorService"); } this.partitionController = partitionController; this.leaseContainer = leaseContainer; this.partitionLoadBalancingStrategy = partitionLoadBalancingStrategy; this.leaseAcquireInterval = leaseAcquireInterval; this.scheduler = scheduler; this.started = false; this.lock = new Object(); } @Override public Mono<Void> start() { synchronized (lock) { if (this.started) { throw new IllegalStateException("Partition load balancer already started"); } this.cancellationTokenSource = new CancellationTokenSource(); this.started = true; } return Mono.fromRunnable( () -> { scheduler.schedule(() -> this.run(this.cancellationTokenSource.getToken()).subscribe()); }); } @Override public Mono<Void> stop() { synchronized (lock) { this.started = false; this.cancellationTokenSource.cancel(); } return this.partitionController.shutdown(); } @Override public boolean isRunning() { return this.started; } }
class PartitionLoadBalancerImpl implements PartitionLoadBalancer { private final Logger logger = LoggerFactory.getLogger(PartitionLoadBalancerImpl.class); private final PartitionController partitionController; private final LeaseContainer leaseContainer; private final PartitionLoadBalancingStrategy partitionLoadBalancingStrategy; private final Duration leaseAcquireInterval; private final Scheduler scheduler; private CancellationTokenSource cancellationTokenSource; private volatile boolean started; private final Object lock; public PartitionLoadBalancerImpl( PartitionController partitionController, LeaseContainer leaseContainer, PartitionLoadBalancingStrategy partitionLoadBalancingStrategy, Duration leaseAcquireInterval, Scheduler scheduler) { if (partitionController == null) { throw new IllegalArgumentException("partitionController"); } if (leaseContainer == null) { throw new IllegalArgumentException("leaseContainer"); } if (partitionLoadBalancingStrategy == null) { throw new IllegalArgumentException("partitionLoadBalancingStrategy"); } if (scheduler == null) { throw new IllegalArgumentException("executorService"); } this.partitionController = partitionController; this.leaseContainer = leaseContainer; this.partitionLoadBalancingStrategy = partitionLoadBalancingStrategy; this.leaseAcquireInterval = leaseAcquireInterval; this.scheduler = scheduler; this.started = false; this.lock = new Object(); } @Override public Mono<Void> start() { synchronized (lock) { if (this.started) { throw new IllegalStateException("Partition load balancer already started"); } this.cancellationTokenSource = new CancellationTokenSource(); this.started = true; } return Mono.fromRunnable( () -> { scheduler.schedule(() -> this.run(this.cancellationTokenSource.getToken()).subscribe()); }); } @Override public Mono<Void> stop() { synchronized (lock) { this.started = false; this.cancellationTokenSource.cancel(); } return this.partitionController.shutdown(); } @Override public boolean isRunning() { return this.started; } }
one more here, lol. We have so many `then()` 🤣
private Mono<Void> run(CancellationToken cancellationToken) { return Flux.just(this) .flatMap(value -> this.leaseContainer.getAllLeases()) .collectList() .flatMap(allLeases -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); List<Lease> leasesToTake = this.partitionLoadBalancingStrategy.selectLeasesToTake(allLeases); if (leasesToTake.size() > 0) { this.logger.info("Found {} total leases, taking ownership of {}", allLeases.size(), leasesToTake.size()); } if (cancellationToken.isCancellationRequested()) return Mono.empty(); return Flux.fromIterable(leasesToTake) .limitRate(1) .flatMap(lease -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); return this.partitionController.addOrUpdateLease(lease); }) .then(); }) .onErrorResume(throwable -> { logger.warn("Unexpected exception thrown while trying to acquire available leases", throwable); return Mono.empty(); }) .then( Mono.just(this) .flatMap(value -> { if (cancellationToken.isCancellationRequested()) { return Mono.empty(); } Instant stopTimer = Instant.now().plus(this.leaseAcquireInterval); return Mono.just(value) .delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL) .repeat(() -> { Instant currentTime = Instant.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }) .then(); }) ) .repeat(() -> !cancellationToken.isCancellationRequested()) .then() .onErrorResume(throwable -> { logger.info("Partition load balancer task stopped."); return this.stop(); }); }
.then();
private Mono<Void> run(CancellationToken cancellationToken) { return Flux.just(this) .flatMap(value -> this.leaseContainer.getAllLeases()) .collectList() .flatMap(allLeases -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); List<Lease> leasesToTake = this.partitionLoadBalancingStrategy.selectLeasesToTake(allLeases); if (leasesToTake.size() > 0) { this.logger.info("Found {} total leases, taking ownership of {}", allLeases.size(), leasesToTake.size()); } if (cancellationToken.isCancellationRequested()) return Mono.empty(); return Flux.fromIterable(leasesToTake) .limitRate(1) .flatMap(lease -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); return this.partitionController.addOrUpdateLease(lease); }) .then(); }) .onErrorResume(throwable -> { logger.warn("Unexpected exception thrown while trying to acquire available leases", throwable); return Mono.empty(); }) .then( Mono.just(this) .flatMap(value -> { if (cancellationToken.isCancellationRequested()) { return Mono.empty(); } Instant stopTimer = Instant.now().plus(this.leaseAcquireInterval); return Mono.just(value) .delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL) .repeat(() -> { Instant currentTime = Instant.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }) .then(); }) ) .repeat(() -> !cancellationToken.isCancellationRequested()) .then() .onErrorResume(throwable -> { logger.info("Partition load balancer task stopped."); return this.stop(); }); }
class PartitionLoadBalancerImpl implements PartitionLoadBalancer { private final Logger logger = LoggerFactory.getLogger(PartitionLoadBalancerImpl.class); private final PartitionController partitionController; private final LeaseContainer leaseContainer; private final PartitionLoadBalancingStrategy partitionLoadBalancingStrategy; private final Duration leaseAcquireInterval; private final Scheduler scheduler; private CancellationTokenSource cancellationTokenSource; private volatile boolean started; private final Object lock; public PartitionLoadBalancerImpl( PartitionController partitionController, LeaseContainer leaseContainer, PartitionLoadBalancingStrategy partitionLoadBalancingStrategy, Duration leaseAcquireInterval, Scheduler scheduler) { if (partitionController == null) { throw new IllegalArgumentException("partitionController"); } if (leaseContainer == null) { throw new IllegalArgumentException("leaseContainer"); } if (partitionLoadBalancingStrategy == null) { throw new IllegalArgumentException("partitionLoadBalancingStrategy"); } if (scheduler == null) { throw new IllegalArgumentException("executorService"); } this.partitionController = partitionController; this.leaseContainer = leaseContainer; this.partitionLoadBalancingStrategy = partitionLoadBalancingStrategy; this.leaseAcquireInterval = leaseAcquireInterval; this.scheduler = scheduler; this.started = false; this.lock = new Object(); } @Override public Mono<Void> start() { synchronized (lock) { if (this.started) { throw new IllegalStateException("Partition load balancer already started"); } this.cancellationTokenSource = new CancellationTokenSource(); this.started = true; } return Mono.fromRunnable( () -> { scheduler.schedule(() -> this.run(this.cancellationTokenSource.getToken()).subscribe()); }); } @Override public Mono<Void> stop() { synchronized (lock) { this.started = false; this.cancellationTokenSource.cancel(); } return this.partitionController.shutdown(); } @Override public boolean isRunning() { return this.started; } }
class PartitionLoadBalancerImpl implements PartitionLoadBalancer { private final Logger logger = LoggerFactory.getLogger(PartitionLoadBalancerImpl.class); private final PartitionController partitionController; private final LeaseContainer leaseContainer; private final PartitionLoadBalancingStrategy partitionLoadBalancingStrategy; private final Duration leaseAcquireInterval; private final Scheduler scheduler; private CancellationTokenSource cancellationTokenSource; private volatile boolean started; private final Object lock; public PartitionLoadBalancerImpl( PartitionController partitionController, LeaseContainer leaseContainer, PartitionLoadBalancingStrategy partitionLoadBalancingStrategy, Duration leaseAcquireInterval, Scheduler scheduler) { if (partitionController == null) { throw new IllegalArgumentException("partitionController"); } if (leaseContainer == null) { throw new IllegalArgumentException("leaseContainer"); } if (partitionLoadBalancingStrategy == null) { throw new IllegalArgumentException("partitionLoadBalancingStrategy"); } if (scheduler == null) { throw new IllegalArgumentException("executorService"); } this.partitionController = partitionController; this.leaseContainer = leaseContainer; this.partitionLoadBalancingStrategy = partitionLoadBalancingStrategy; this.leaseAcquireInterval = leaseAcquireInterval; this.scheduler = scheduler; this.started = false; this.lock = new Object(); } @Override public Mono<Void> start() { synchronized (lock) { if (this.started) { throw new IllegalStateException("Partition load balancer already started"); } this.cancellationTokenSource = new CancellationTokenSource(); this.started = true; } return Mono.fromRunnable( () -> { scheduler.schedule(() -> this.run(this.cancellationTokenSource.getToken()).subscribe()); }); } @Override public Mono<Void> stop() { synchronized (lock) { this.started = false; this.cancellationTokenSource.cancel(); } return this.partitionController.shutdown(); } @Override public boolean isRunning() { return this.started; } }
I think it is clearer to only special case negative here - like in Annie's commit. Definitely in a hotfix - but even conceptually.
public List<Lease> selectLeasesToTake(List<Lease> allLeases) { Map<String, Integer> workerToPartitionCount = new HashMap<>(); List<Lease> expiredLeases = new ArrayList<>(); Map<String, Lease> allPartitions = new HashMap<>(); this.categorizeLeases(allLeases, allPartitions, expiredLeases, workerToPartitionCount); int partitionCount = allPartitions.size(); int workerCount = workerToPartitionCount.size(); if (partitionCount <= 0) { return new ArrayList<Lease>(); } int target = this.calculateTargetPartitionCount(partitionCount, workerCount); int myCount = workerToPartitionCount.get(this.hostName); int partitionsNeededForMe = target - myCount; if (expiredLeases.size() > 0) { if ((this.maxPartitionCount == 0 && partitionsNeededForMe <= 0) || (partitionsNeededForMe > 1 && workerToPartitionCount.size() > 1)) { partitionsNeededForMe = 1; } if (partitionsNeededForMe == 1) { Random random = new Random(); Lease expiredLease = expiredLeases.get(random.nextInt(expiredLeases.size())); this.logger.info("Found unused or expired lease {} (owner was {}); previous lease count for instance owner {} is {}, count of leases to target is {} and maxScaleCount {} ", expiredLease.getLeaseToken(), expiredLease.getOwner(), this.hostName, myCount, partitionsNeededForMe, this.maxPartitionCount); return Collections.singletonList(expiredLease); } else { for (Lease lease : expiredLeases) { this.logger.info("Found unused or expired lease {} (owner was {}); previous lease count for instance owner {} is {} and maxScaleCount {} ", lease.getLeaseToken(), lease.getOwner(), this.hostName, myCount, this.maxPartitionCount); } } if (partitionsNeededForMe < 0) { return new ArrayList<>(); } return expiredLeases.subList(0, Math.min(partitionsNeededForMe, expiredLeases.size())); } if (partitionsNeededForMe <= 0) return new ArrayList<Lease>(); Lease stolenLease = getLeaseToSteal(workerToPartitionCount, target, partitionsNeededForMe, allPartitions); List<Lease> stolenLeases = new ArrayList<>(); if (stolenLease != null) { stolenLeases.add(stolenLease); } return stolenLeases; }
if (partitionsNeededForMe < 0)
public List<Lease> selectLeasesToTake(List<Lease> allLeases) { Map<String, Integer> workerToPartitionCount = new HashMap<>(); List<Lease> expiredLeases = new ArrayList<>(); Map<String, Lease> allPartitions = new HashMap<>(); this.categorizeLeases(allLeases, allPartitions, expiredLeases, workerToPartitionCount); int partitionCount = allPartitions.size(); int workerCount = workerToPartitionCount.size(); if (partitionCount <= 0) { return new ArrayList<Lease>(); } int target = this.calculateTargetPartitionCount(partitionCount, workerCount); int myCount = workerToPartitionCount.get(this.hostName); int partitionsNeededForMe = target - myCount; if (expiredLeases.size() > 0) { if ((this.maxPartitionCount == 0 && partitionsNeededForMe <= 0) || (partitionsNeededForMe > 1 && workerToPartitionCount.size() > 1)) { partitionsNeededForMe = 1; } if (partitionsNeededForMe == 1) { Random random = new Random(); Lease expiredLease = expiredLeases.get(random.nextInt(expiredLeases.size())); this.logger.info("Found unused or expired lease {} (owner was {}); previous lease count for instance owner {} is {}, count of leases to target is {} and maxScaleCount {} ", expiredLease.getLeaseToken(), expiredLease.getOwner(), this.hostName, myCount, partitionsNeededForMe, this.maxPartitionCount); return Collections.singletonList(expiredLease); } else { for (Lease lease : expiredLeases) { this.logger.info("Found unused or expired lease {} (owner was {}); previous lease count for instance owner {} is {} and maxScaleCount {} ", lease.getLeaseToken(), lease.getOwner(), this.hostName, myCount, this.maxPartitionCount); } } if (partitionsNeededForMe <= 0) { return new ArrayList<>(); } return expiredLeases.subList(0, Math.min(partitionsNeededForMe, expiredLeases.size())); } if (partitionsNeededForMe <= 0) return new ArrayList<Lease>(); Lease stolenLease = getLeaseToSteal(workerToPartitionCount, target, partitionsNeededForMe, allPartitions); List<Lease> stolenLeases = new ArrayList<>(); if (stolenLease != null) { stolenLeases.add(stolenLease); } return stolenLeases; }
class EqualPartitionsBalancingStrategy implements PartitionLoadBalancingStrategy { private final Logger logger = LoggerFactory.getLogger(EqualPartitionsBalancingStrategy.class); private final String hostName; private final int minPartitionCount; private final int maxPartitionCount; private final Duration leaseExpirationInterval; public EqualPartitionsBalancingStrategy(String hostName, int minPartitionCount, int maxPartitionCount, Duration leaseExpirationInterval) { if (hostName == null) { throw new IllegalArgumentException("hostName"); } this.hostName = hostName; this.minPartitionCount = minPartitionCount; this.maxPartitionCount = maxPartitionCount; this.leaseExpirationInterval = leaseExpirationInterval; } @Override private static Lease getLeaseToSteal( Map<String, Integer> workerToPartitionCount, int target, int partitionsNeededForMe, Map<String, Lease> allPartitions) { Map.Entry<String, Integer> workerToStealFrom = findWorkerWithMostPartitions(workerToPartitionCount); if (workerToStealFrom.getValue() > target - (partitionsNeededForMe > 1 ? 1 : 0)) { for (Map.Entry<String, Lease> entry : allPartitions.entrySet()) { if (entry.getValue().getOwner().equalsIgnoreCase(workerToStealFrom.getKey())) { return entry.getValue(); } } } return null; } private static Map.Entry<String, Integer> findWorkerWithMostPartitions(Map<String, Integer> workerToPartitionCount) { Map.Entry<String, Integer> workerToStealFrom = new ChangeFeedHelper.KeyValuePair<>("", 0); for (Map.Entry<String, Integer> entry : workerToPartitionCount.entrySet()) { if (workerToStealFrom.getValue() <= entry.getValue()) { workerToStealFrom = entry; } } return workerToStealFrom; } private int calculateTargetPartitionCount(int partitionCount, int workerCount) { int target = 1; if (partitionCount > workerCount) { target = (int)Math.ceil((double)partitionCount / workerCount); } if (this.maxPartitionCount > 0 && target > this.maxPartitionCount) { target = this.maxPartitionCount; } if (this.minPartitionCount > 0 && target < this.minPartitionCount) { target = this.minPartitionCount; } return target; } private void categorizeLeases( List<Lease> allLeases, Map<String, Lease> allPartitions, List<Lease> expiredLeases, Map<String, Integer> workerToPartitionCount) { for (Lease lease : allLeases) { allPartitions.put(lease.getLeaseToken(), lease); if (lease.getOwner() == null || lease.getOwner().isEmpty() || this.isExpired(lease)) { expiredLeases.add(lease); } else { String assignedTo = lease.getOwner(); Integer count = workerToPartitionCount.get(assignedTo); if (count != null) { workerToPartitionCount.replace(assignedTo, count + 1); } else { workerToPartitionCount.put(assignedTo, 1); } } } if (!workerToPartitionCount.containsKey(this.hostName)) { workerToPartitionCount.put(this.hostName, 0); } } private boolean isExpired(Lease lease) { if (lease.getOwner() == null || lease.getOwner().isEmpty() || lease.getTimestamp() == null) { return true; } Instant leaseExpireTime = Instant.parse(lease.getTimestamp()).plus(this.leaseExpirationInterval); this.logger.debug("Current lease timestamp: {}, current time: {}", leaseExpireTime, Instant.now()); return leaseExpireTime.isBefore(Instant.now()); } }
class EqualPartitionsBalancingStrategy implements PartitionLoadBalancingStrategy { private final Logger logger = LoggerFactory.getLogger(EqualPartitionsBalancingStrategy.class); private final String hostName; private final int minPartitionCount; private final int maxPartitionCount; private final Duration leaseExpirationInterval; public EqualPartitionsBalancingStrategy(String hostName, int minPartitionCount, int maxPartitionCount, Duration leaseExpirationInterval) { if (hostName == null) { throw new IllegalArgumentException("hostName"); } this.hostName = hostName; this.minPartitionCount = minPartitionCount; this.maxPartitionCount = maxPartitionCount; this.leaseExpirationInterval = leaseExpirationInterval; } @Override private static Lease getLeaseToSteal( Map<String, Integer> workerToPartitionCount, int target, int partitionsNeededForMe, Map<String, Lease> allPartitions) { Map.Entry<String, Integer> workerToStealFrom = findWorkerWithMostPartitions(workerToPartitionCount); if (workerToStealFrom.getValue() > target - (partitionsNeededForMe > 1 ? 1 : 0)) { for (Map.Entry<String, Lease> entry : allPartitions.entrySet()) { if (entry.getValue().getOwner().equalsIgnoreCase(workerToStealFrom.getKey())) { return entry.getValue(); } } } return null; } private static Map.Entry<String, Integer> findWorkerWithMostPartitions(Map<String, Integer> workerToPartitionCount) { Map.Entry<String, Integer> workerToStealFrom = new ChangeFeedHelper.KeyValuePair<>("", 0); for (Map.Entry<String, Integer> entry : workerToPartitionCount.entrySet()) { if (workerToStealFrom.getValue() <= entry.getValue()) { workerToStealFrom = entry; } } return workerToStealFrom; } private int calculateTargetPartitionCount(int partitionCount, int workerCount) { int target = 1; if (partitionCount > workerCount) { target = (int)Math.ceil((double)partitionCount / workerCount); } if (this.maxPartitionCount > 0 && target > this.maxPartitionCount) { target = this.maxPartitionCount; } if (this.minPartitionCount > 0 && target < this.minPartitionCount) { target = this.minPartitionCount; } return target; } private void categorizeLeases( List<Lease> allLeases, Map<String, Lease> allPartitions, List<Lease> expiredLeases, Map<String, Integer> workerToPartitionCount) { for (Lease lease : allLeases) { allPartitions.put(lease.getLeaseToken(), lease); if (lease.getOwner() == null || lease.getOwner().isEmpty() || this.isExpired(lease)) { expiredLeases.add(lease); } else { String assignedTo = lease.getOwner(); Integer count = workerToPartitionCount.get(assignedTo); if (count != null) { workerToPartitionCount.replace(assignedTo, count + 1); } else { workerToPartitionCount.put(assignedTo, 1); } } } if (!workerToPartitionCount.containsKey(this.hostName)) { workerToPartitionCount.put(this.hostName, 0); } } private boolean isExpired(Lease lease) { if (lease.getOwner() == null || lease.getOwner().isEmpty() || lease.getTimestamp() == null) { return true; } Instant leaseExpireTime = Instant.parse(lease.getTimestamp()).plus(this.leaseExpirationInterval); this.logger.debug("Current lease timestamp: {}, current time: {}", leaseExpireTime, Instant.now()); return leaseExpireTime.isBefore(Instant.now()); } }
yea it will be different. if it is removed above onErrorResume, it will not be executed on failure cases.
private Mono<Void> run(CancellationToken cancellationToken) { return Flux.just(this) .flatMap(value -> this.leaseContainer.getAllLeases()) .collectList() .flatMap(allLeases -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); List<Lease> leasesToTake = this.partitionLoadBalancingStrategy.selectLeasesToTake(allLeases); if (leasesToTake.size() > 0) { this.logger.info("Found {} total leases, taking ownership of {}", allLeases.size(), leasesToTake.size()); } if (cancellationToken.isCancellationRequested()) return Mono.empty(); return Flux.fromIterable(leasesToTake) .limitRate(1) .flatMap(lease -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); return this.partitionController.addOrUpdateLease(lease); }) .then(); }) .onErrorResume(throwable -> { logger.warn("Unexpected exception thrown while trying to acquire available leases", throwable); return Mono.empty(); }) .then( Mono.just(this) .flatMap(value -> { if (cancellationToken.isCancellationRequested()) { return Mono.empty(); } Instant stopTimer = Instant.now().plus(this.leaseAcquireInterval); return Mono.just(value) .delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL) .repeat(() -> { Instant currentTime = Instant.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }) .then(); }) ) .repeat(() -> !cancellationToken.isCancellationRequested()) .then() .onErrorResume(throwable -> { logger.info("Partition load balancer task stopped."); return this.stop(); }); }
.then(
private Mono<Void> run(CancellationToken cancellationToken) { return Flux.just(this) .flatMap(value -> this.leaseContainer.getAllLeases()) .collectList() .flatMap(allLeases -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); List<Lease> leasesToTake = this.partitionLoadBalancingStrategy.selectLeasesToTake(allLeases); if (leasesToTake.size() > 0) { this.logger.info("Found {} total leases, taking ownership of {}", allLeases.size(), leasesToTake.size()); } if (cancellationToken.isCancellationRequested()) return Mono.empty(); return Flux.fromIterable(leasesToTake) .limitRate(1) .flatMap(lease -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); return this.partitionController.addOrUpdateLease(lease); }) .then(); }) .onErrorResume(throwable -> { logger.warn("Unexpected exception thrown while trying to acquire available leases", throwable); return Mono.empty(); }) .then( Mono.just(this) .flatMap(value -> { if (cancellationToken.isCancellationRequested()) { return Mono.empty(); } Instant stopTimer = Instant.now().plus(this.leaseAcquireInterval); return Mono.just(value) .delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL) .repeat(() -> { Instant currentTime = Instant.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }) .then(); }) ) .repeat(() -> !cancellationToken.isCancellationRequested()) .then() .onErrorResume(throwable -> { logger.info("Partition load balancer task stopped."); return this.stop(); }); }
class PartitionLoadBalancerImpl implements PartitionLoadBalancer { private final Logger logger = LoggerFactory.getLogger(PartitionLoadBalancerImpl.class); private final PartitionController partitionController; private final LeaseContainer leaseContainer; private final PartitionLoadBalancingStrategy partitionLoadBalancingStrategy; private final Duration leaseAcquireInterval; private final Scheduler scheduler; private CancellationTokenSource cancellationTokenSource; private volatile boolean started; private final Object lock; public PartitionLoadBalancerImpl( PartitionController partitionController, LeaseContainer leaseContainer, PartitionLoadBalancingStrategy partitionLoadBalancingStrategy, Duration leaseAcquireInterval, Scheduler scheduler) { checkNotNull(partitionController, "Argument 'partitionController' can not be null"); checkNotNull(leaseContainer, "Argument 'leaseContainer' can not be null"); checkNotNull(partitionLoadBalancingStrategy, "Argument 'partitionLoadBalancingStrategy' can not be null"); checkNotNull(scheduler, "Argument 'scheduler' can not be null"); this.partitionController = partitionController; this.leaseContainer = leaseContainer; this.partitionLoadBalancingStrategy = partitionLoadBalancingStrategy; this.leaseAcquireInterval = leaseAcquireInterval; this.scheduler = scheduler; this.started = false; this.lock = new Object(); } @Override public Mono<Void> start() { synchronized (lock) { if (this.started) { throw new IllegalStateException("Partition load balancer already started"); } this.cancellationTokenSource = new CancellationTokenSource(); this.started = true; } return Mono.fromRunnable( () -> { scheduler.schedule(() -> this.run(this.cancellationTokenSource.getToken()).subscribe()); }); } @Override public Mono<Void> stop() { synchronized (lock) { this.started = false; this.cancellationTokenSource.cancel(); } return this.partitionController.shutdown(); } @Override public boolean isRunning() { return this.started; } }
class PartitionLoadBalancerImpl implements PartitionLoadBalancer { private final Logger logger = LoggerFactory.getLogger(PartitionLoadBalancerImpl.class); private final PartitionController partitionController; private final LeaseContainer leaseContainer; private final PartitionLoadBalancingStrategy partitionLoadBalancingStrategy; private final Duration leaseAcquireInterval; private final Scheduler scheduler; private CancellationTokenSource cancellationTokenSource; private volatile boolean started; private final Object lock; public PartitionLoadBalancerImpl( PartitionController partitionController, LeaseContainer leaseContainer, PartitionLoadBalancingStrategy partitionLoadBalancingStrategy, Duration leaseAcquireInterval, Scheduler scheduler) { checkNotNull(partitionController, "Argument 'partitionController' can not be null"); checkNotNull(leaseContainer, "Argument 'leaseContainer' can not be null"); checkNotNull(partitionLoadBalancingStrategy, "Argument 'partitionLoadBalancingStrategy' can not be null"); checkNotNull(scheduler, "Argument 'scheduler' can not be null"); this.partitionController = partitionController; this.leaseContainer = leaseContainer; this.partitionLoadBalancingStrategy = partitionLoadBalancingStrategy; this.leaseAcquireInterval = leaseAcquireInterval; this.scheduler = scheduler; this.started = false; this.lock = new Object(); } @Override public Mono<Void> start() { synchronized (lock) { if (this.started) { throw new IllegalStateException("Partition load balancer already started"); } this.cancellationTokenSource = new CancellationTokenSource(); this.started = true; } return Mono.fromRunnable( () -> { scheduler.schedule(() -> this.run(this.cancellationTokenSource.getToken()).subscribe()); }); } @Override public Mono<Void> stop() { synchronized (lock) { this.started = false; this.cancellationTokenSource.cancel(); } return this.partitionController.shutdown(); } @Override public boolean isRunning() { return this.started; } }
You'll still need the then --> The then operator strips the "final" completion signal of the flux and exposes it as a Mono. otherwise the onErrorResume would happen for every signal of the flux - not just once. At least that is my understanding - @xinlian12 can probably better explain or correct me ;-)
private Mono<Void> run(CancellationToken cancellationToken) { return Flux.just(this) .flatMap(value -> this.leaseContainer.getAllLeases()) .collectList() .flatMap(allLeases -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); List<Lease> leasesToTake = this.partitionLoadBalancingStrategy.selectLeasesToTake(allLeases); if (leasesToTake.size() > 0) { this.logger.info("Found {} total leases, taking ownership of {}", allLeases.size(), leasesToTake.size()); } if (cancellationToken.isCancellationRequested()) return Mono.empty(); return Flux.fromIterable(leasesToTake) .limitRate(1) .flatMap(lease -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); return this.partitionController.addOrUpdateLease(lease); }) .then(); }) .onErrorResume(throwable -> { logger.warn("Unexpected exception thrown while trying to acquire available leases", throwable); return Mono.empty(); }) .then( Mono.just(this) .flatMap(value -> { if (cancellationToken.isCancellationRequested()) { return Mono.empty(); } Instant stopTimer = Instant.now().plus(this.leaseAcquireInterval); return Mono.just(value) .delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL) .repeat(() -> { Instant currentTime = Instant.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }) .then(); }) ) .repeat(() -> !cancellationToken.isCancellationRequested()) .then() .onErrorResume(throwable -> { logger.info("Partition load balancer task stopped."); return this.stop(); }); }
.then();
private Mono<Void> run(CancellationToken cancellationToken) { return Flux.just(this) .flatMap(value -> this.leaseContainer.getAllLeases()) .collectList() .flatMap(allLeases -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); List<Lease> leasesToTake = this.partitionLoadBalancingStrategy.selectLeasesToTake(allLeases); if (leasesToTake.size() > 0) { this.logger.info("Found {} total leases, taking ownership of {}", allLeases.size(), leasesToTake.size()); } if (cancellationToken.isCancellationRequested()) return Mono.empty(); return Flux.fromIterable(leasesToTake) .limitRate(1) .flatMap(lease -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); return this.partitionController.addOrUpdateLease(lease); }) .then(); }) .onErrorResume(throwable -> { logger.warn("Unexpected exception thrown while trying to acquire available leases", throwable); return Mono.empty(); }) .then( Mono.just(this) .flatMap(value -> { if (cancellationToken.isCancellationRequested()) { return Mono.empty(); } Instant stopTimer = Instant.now().plus(this.leaseAcquireInterval); return Mono.just(value) .delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL) .repeat(() -> { Instant currentTime = Instant.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }) .then(); }) ) .repeat(() -> !cancellationToken.isCancellationRequested()) .then() .onErrorResume(throwable -> { logger.info("Partition load balancer task stopped."); return this.stop(); }); }
class PartitionLoadBalancerImpl implements PartitionLoadBalancer { private final Logger logger = LoggerFactory.getLogger(PartitionLoadBalancerImpl.class); private final PartitionController partitionController; private final LeaseContainer leaseContainer; private final PartitionLoadBalancingStrategy partitionLoadBalancingStrategy; private final Duration leaseAcquireInterval; private final Scheduler scheduler; private CancellationTokenSource cancellationTokenSource; private volatile boolean started; private final Object lock; public PartitionLoadBalancerImpl( PartitionController partitionController, LeaseContainer leaseContainer, PartitionLoadBalancingStrategy partitionLoadBalancingStrategy, Duration leaseAcquireInterval, Scheduler scheduler) { checkNotNull(partitionController, "Argument 'partitionController' can not be null"); checkNotNull(leaseContainer, "Argument 'leaseContainer' can not be null"); checkNotNull(partitionLoadBalancingStrategy, "Argument 'partitionLoadBalancingStrategy' can not be null"); checkNotNull(scheduler, "Argument 'scheduler' can not be null"); this.partitionController = partitionController; this.leaseContainer = leaseContainer; this.partitionLoadBalancingStrategy = partitionLoadBalancingStrategy; this.leaseAcquireInterval = leaseAcquireInterval; this.scheduler = scheduler; this.started = false; this.lock = new Object(); } @Override public Mono<Void> start() { synchronized (lock) { if (this.started) { throw new IllegalStateException("Partition load balancer already started"); } this.cancellationTokenSource = new CancellationTokenSource(); this.started = true; } return Mono.fromRunnable( () -> { scheduler.schedule(() -> this.run(this.cancellationTokenSource.getToken()).subscribe()); }); } @Override public Mono<Void> stop() { synchronized (lock) { this.started = false; this.cancellationTokenSource.cancel(); } return this.partitionController.shutdown(); } @Override public boolean isRunning() { return this.started; } }
class PartitionLoadBalancerImpl implements PartitionLoadBalancer { private final Logger logger = LoggerFactory.getLogger(PartitionLoadBalancerImpl.class); private final PartitionController partitionController; private final LeaseContainer leaseContainer; private final PartitionLoadBalancingStrategy partitionLoadBalancingStrategy; private final Duration leaseAcquireInterval; private final Scheduler scheduler; private CancellationTokenSource cancellationTokenSource; private volatile boolean started; private final Object lock; public PartitionLoadBalancerImpl( PartitionController partitionController, LeaseContainer leaseContainer, PartitionLoadBalancingStrategy partitionLoadBalancingStrategy, Duration leaseAcquireInterval, Scheduler scheduler) { checkNotNull(partitionController, "Argument 'partitionController' can not be null"); checkNotNull(leaseContainer, "Argument 'leaseContainer' can not be null"); checkNotNull(partitionLoadBalancingStrategy, "Argument 'partitionLoadBalancingStrategy' can not be null"); checkNotNull(scheduler, "Argument 'scheduler' can not be null"); this.partitionController = partitionController; this.leaseContainer = leaseContainer; this.partitionLoadBalancingStrategy = partitionLoadBalancingStrategy; this.leaseAcquireInterval = leaseAcquireInterval; this.scheduler = scheduler; this.started = false; this.lock = new Object(); } @Override public Mono<Void> start() { synchronized (lock) { if (this.started) { throw new IllegalStateException("Partition load balancer already started"); } this.cancellationTokenSource = new CancellationTokenSource(); this.started = true; } return Mono.fromRunnable( () -> { scheduler.schedule(() -> this.run(this.cancellationTokenSource.getToken()).subscribe()); }); } @Override public Mono<Void> stop() { synchronized (lock) { this.started = false; this.cancellationTokenSource.cancel(); } return this.partitionController.shutdown(); } @Override public boolean isRunning() { return this.started; } }
I would also prefer if we change this to "<= 0" with the change in the comment as well that we reach this case where current instance has either more or exact "maxScaleCount" number of leases owned.
public List<Lease> selectLeasesToTake(List<Lease> allLeases) { Map<String, Integer> workerToPartitionCount = new HashMap<>(); List<Lease> expiredLeases = new ArrayList<>(); Map<String, Lease> allPartitions = new HashMap<>(); this.categorizeLeases(allLeases, allPartitions, expiredLeases, workerToPartitionCount); int partitionCount = allPartitions.size(); int workerCount = workerToPartitionCount.size(); if (partitionCount <= 0) { return new ArrayList<Lease>(); } int target = this.calculateTargetPartitionCount(partitionCount, workerCount); int myCount = workerToPartitionCount.get(this.hostName); int partitionsNeededForMe = target - myCount; if (expiredLeases.size() > 0) { if ((this.maxPartitionCount == 0 && partitionsNeededForMe <= 0) || (partitionsNeededForMe > 1 && workerToPartitionCount.size() > 1)) { partitionsNeededForMe = 1; } if (partitionsNeededForMe == 1) { Random random = new Random(); Lease expiredLease = expiredLeases.get(random.nextInt(expiredLeases.size())); this.logger.info("Found unused or expired lease {} (owner was {}); previous lease count for instance owner {} is {}, count of leases to target is {} and maxScaleCount {} ", expiredLease.getLeaseToken(), expiredLease.getOwner(), this.hostName, myCount, partitionsNeededForMe, this.maxPartitionCount); return Collections.singletonList(expiredLease); } else { for (Lease lease : expiredLeases) { this.logger.info("Found unused or expired lease {} (owner was {}); previous lease count for instance owner {} is {} and maxScaleCount {} ", lease.getLeaseToken(), lease.getOwner(), this.hostName, myCount, this.maxPartitionCount); } } if (partitionsNeededForMe < 0) { return new ArrayList<>(); } return expiredLeases.subList(0, Math.min(partitionsNeededForMe, expiredLeases.size())); } if (partitionsNeededForMe <= 0) return new ArrayList<Lease>(); Lease stolenLease = getLeaseToSteal(workerToPartitionCount, target, partitionsNeededForMe, allPartitions); List<Lease> stolenLeases = new ArrayList<>(); if (stolenLease != null) { stolenLeases.add(stolenLease); } return stolenLeases; }
if (partitionsNeededForMe < 0)
public List<Lease> selectLeasesToTake(List<Lease> allLeases) { Map<String, Integer> workerToPartitionCount = new HashMap<>(); List<Lease> expiredLeases = new ArrayList<>(); Map<String, Lease> allPartitions = new HashMap<>(); this.categorizeLeases(allLeases, allPartitions, expiredLeases, workerToPartitionCount); int partitionCount = allPartitions.size(); int workerCount = workerToPartitionCount.size(); if (partitionCount <= 0) { return new ArrayList<Lease>(); } int target = this.calculateTargetPartitionCount(partitionCount, workerCount); int myCount = workerToPartitionCount.get(this.hostName); int partitionsNeededForMe = target - myCount; if (expiredLeases.size() > 0) { if ((this.maxPartitionCount == 0 && partitionsNeededForMe <= 0) || (partitionsNeededForMe > 1 && workerToPartitionCount.size() > 1)) { partitionsNeededForMe = 1; } if (partitionsNeededForMe == 1) { Random random = new Random(); Lease expiredLease = expiredLeases.get(random.nextInt(expiredLeases.size())); this.logger.info("Found unused or expired lease {} (owner was {}); previous lease count for instance owner {} is {}, count of leases to target is {} and maxScaleCount {} ", expiredLease.getLeaseToken(), expiredLease.getOwner(), this.hostName, myCount, partitionsNeededForMe, this.maxPartitionCount); return Collections.singletonList(expiredLease); } else { for (Lease lease : expiredLeases) { this.logger.info("Found unused or expired lease {} (owner was {}); previous lease count for instance owner {} is {} and maxScaleCount {} ", lease.getLeaseToken(), lease.getOwner(), this.hostName, myCount, this.maxPartitionCount); } } if (partitionsNeededForMe <= 0) { return new ArrayList<>(); } return expiredLeases.subList(0, Math.min(partitionsNeededForMe, expiredLeases.size())); } if (partitionsNeededForMe <= 0) return new ArrayList<Lease>(); Lease stolenLease = getLeaseToSteal(workerToPartitionCount, target, partitionsNeededForMe, allPartitions); List<Lease> stolenLeases = new ArrayList<>(); if (stolenLease != null) { stolenLeases.add(stolenLease); } return stolenLeases; }
class EqualPartitionsBalancingStrategy implements PartitionLoadBalancingStrategy { private final Logger logger = LoggerFactory.getLogger(EqualPartitionsBalancingStrategy.class); private final String hostName; private final int minPartitionCount; private final int maxPartitionCount; private final Duration leaseExpirationInterval; public EqualPartitionsBalancingStrategy(String hostName, int minPartitionCount, int maxPartitionCount, Duration leaseExpirationInterval) { if (hostName == null) { throw new IllegalArgumentException("hostName"); } this.hostName = hostName; this.minPartitionCount = minPartitionCount; this.maxPartitionCount = maxPartitionCount; this.leaseExpirationInterval = leaseExpirationInterval; } @Override private static Lease getLeaseToSteal( Map<String, Integer> workerToPartitionCount, int target, int partitionsNeededForMe, Map<String, Lease> allPartitions) { Map.Entry<String, Integer> workerToStealFrom = findWorkerWithMostPartitions(workerToPartitionCount); if (workerToStealFrom.getValue() > target - (partitionsNeededForMe > 1 ? 1 : 0)) { for (Map.Entry<String, Lease> entry : allPartitions.entrySet()) { if (entry.getValue().getOwner().equalsIgnoreCase(workerToStealFrom.getKey())) { return entry.getValue(); } } } return null; } private static Map.Entry<String, Integer> findWorkerWithMostPartitions(Map<String, Integer> workerToPartitionCount) { Map.Entry<String, Integer> workerToStealFrom = new ChangeFeedHelper.KeyValuePair<>("", 0); for (Map.Entry<String, Integer> entry : workerToPartitionCount.entrySet()) { if (workerToStealFrom.getValue() <= entry.getValue()) { workerToStealFrom = entry; } } return workerToStealFrom; } private int calculateTargetPartitionCount(int partitionCount, int workerCount) { int target = 1; if (partitionCount > workerCount) { target = (int)Math.ceil((double)partitionCount / workerCount); } if (this.maxPartitionCount > 0 && target > this.maxPartitionCount) { target = this.maxPartitionCount; } if (this.minPartitionCount > 0 && target < this.minPartitionCount) { target = this.minPartitionCount; } return target; } private void categorizeLeases( List<Lease> allLeases, Map<String, Lease> allPartitions, List<Lease> expiredLeases, Map<String, Integer> workerToPartitionCount) { for (Lease lease : allLeases) { allPartitions.put(lease.getLeaseToken(), lease); if (lease.getOwner() == null || lease.getOwner().isEmpty() || this.isExpired(lease)) { expiredLeases.add(lease); } else { String assignedTo = lease.getOwner(); Integer count = workerToPartitionCount.get(assignedTo); if (count != null) { workerToPartitionCount.replace(assignedTo, count + 1); } else { workerToPartitionCount.put(assignedTo, 1); } } } if (!workerToPartitionCount.containsKey(this.hostName)) { workerToPartitionCount.put(this.hostName, 0); } } private boolean isExpired(Lease lease) { if (lease.getOwner() == null || lease.getOwner().isEmpty() || lease.getTimestamp() == null) { return true; } Instant leaseExpireTime = Instant.parse(lease.getTimestamp()).plus(this.leaseExpirationInterval); this.logger.debug("Current lease timestamp: {}, current time: {}", leaseExpireTime, Instant.now()); return leaseExpireTime.isBefore(Instant.now()); } }
class EqualPartitionsBalancingStrategy implements PartitionLoadBalancingStrategy { private final Logger logger = LoggerFactory.getLogger(EqualPartitionsBalancingStrategy.class); private final String hostName; private final int minPartitionCount; private final int maxPartitionCount; private final Duration leaseExpirationInterval; public EqualPartitionsBalancingStrategy(String hostName, int minPartitionCount, int maxPartitionCount, Duration leaseExpirationInterval) { if (hostName == null) { throw new IllegalArgumentException("hostName"); } this.hostName = hostName; this.minPartitionCount = minPartitionCount; this.maxPartitionCount = maxPartitionCount; this.leaseExpirationInterval = leaseExpirationInterval; } @Override private static Lease getLeaseToSteal( Map<String, Integer> workerToPartitionCount, int target, int partitionsNeededForMe, Map<String, Lease> allPartitions) { Map.Entry<String, Integer> workerToStealFrom = findWorkerWithMostPartitions(workerToPartitionCount); if (workerToStealFrom.getValue() > target - (partitionsNeededForMe > 1 ? 1 : 0)) { for (Map.Entry<String, Lease> entry : allPartitions.entrySet()) { if (entry.getValue().getOwner().equalsIgnoreCase(workerToStealFrom.getKey())) { return entry.getValue(); } } } return null; } private static Map.Entry<String, Integer> findWorkerWithMostPartitions(Map<String, Integer> workerToPartitionCount) { Map.Entry<String, Integer> workerToStealFrom = new ChangeFeedHelper.KeyValuePair<>("", 0); for (Map.Entry<String, Integer> entry : workerToPartitionCount.entrySet()) { if (workerToStealFrom.getValue() <= entry.getValue()) { workerToStealFrom = entry; } } return workerToStealFrom; } private int calculateTargetPartitionCount(int partitionCount, int workerCount) { int target = 1; if (partitionCount > workerCount) { target = (int)Math.ceil((double)partitionCount / workerCount); } if (this.maxPartitionCount > 0 && target > this.maxPartitionCount) { target = this.maxPartitionCount; } if (this.minPartitionCount > 0 && target < this.minPartitionCount) { target = this.minPartitionCount; } return target; } private void categorizeLeases( List<Lease> allLeases, Map<String, Lease> allPartitions, List<Lease> expiredLeases, Map<String, Integer> workerToPartitionCount) { for (Lease lease : allLeases) { allPartitions.put(lease.getLeaseToken(), lease); if (lease.getOwner() == null || lease.getOwner().isEmpty() || this.isExpired(lease)) { expiredLeases.add(lease); } else { String assignedTo = lease.getOwner(); Integer count = workerToPartitionCount.get(assignedTo); if (count != null) { workerToPartitionCount.replace(assignedTo, count + 1); } else { workerToPartitionCount.put(assignedTo, 1); } } } if (!workerToPartitionCount.containsKey(this.hostName)) { workerToPartitionCount.put(this.hostName, 0); } } private boolean isExpired(Lease lease) { if (lease.getOwner() == null || lease.getOwner().isEmpty() || lease.getTimestamp() == null) { return true; } Instant leaseExpireTime = Instant.parse(lease.getTimestamp()).plus(this.leaseExpirationInterval); this.logger.debug("Current lease timestamp: {}, current time: {}", leaseExpireTime, Instant.now()); return leaseExpireTime.isBefore(Instant.now()); } }
and it will complain the return type mismatch >_<
private Mono<Void> run(CancellationToken cancellationToken) { return Flux.just(this) .flatMap(value -> this.leaseContainer.getAllLeases()) .collectList() .flatMap(allLeases -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); List<Lease> leasesToTake = this.partitionLoadBalancingStrategy.selectLeasesToTake(allLeases); if (leasesToTake.size() > 0) { this.logger.info("Found {} total leases, taking ownership of {}", allLeases.size(), leasesToTake.size()); } if (cancellationToken.isCancellationRequested()) return Mono.empty(); return Flux.fromIterable(leasesToTake) .limitRate(1) .flatMap(lease -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); return this.partitionController.addOrUpdateLease(lease); }) .then(); }) .onErrorResume(throwable -> { logger.warn("Unexpected exception thrown while trying to acquire available leases", throwable); return Mono.empty(); }) .then( Mono.just(this) .flatMap(value -> { if (cancellationToken.isCancellationRequested()) { return Mono.empty(); } Instant stopTimer = Instant.now().plus(this.leaseAcquireInterval); return Mono.just(value) .delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL) .repeat(() -> { Instant currentTime = Instant.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }) .then(); }) ) .repeat(() -> !cancellationToken.isCancellationRequested()) .then() .onErrorResume(throwable -> { logger.info("Partition load balancer task stopped."); return this.stop(); }); }
.then();
private Mono<Void> run(CancellationToken cancellationToken) { return Flux.just(this) .flatMap(value -> this.leaseContainer.getAllLeases()) .collectList() .flatMap(allLeases -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); List<Lease> leasesToTake = this.partitionLoadBalancingStrategy.selectLeasesToTake(allLeases); if (leasesToTake.size() > 0) { this.logger.info("Found {} total leases, taking ownership of {}", allLeases.size(), leasesToTake.size()); } if (cancellationToken.isCancellationRequested()) return Mono.empty(); return Flux.fromIterable(leasesToTake) .limitRate(1) .flatMap(lease -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); return this.partitionController.addOrUpdateLease(lease); }) .then(); }) .onErrorResume(throwable -> { logger.warn("Unexpected exception thrown while trying to acquire available leases", throwable); return Mono.empty(); }) .then( Mono.just(this) .flatMap(value -> { if (cancellationToken.isCancellationRequested()) { return Mono.empty(); } Instant stopTimer = Instant.now().plus(this.leaseAcquireInterval); return Mono.just(value) .delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL) .repeat(() -> { Instant currentTime = Instant.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }) .then(); }) ) .repeat(() -> !cancellationToken.isCancellationRequested()) .then() .onErrorResume(throwable -> { logger.info("Partition load balancer task stopped."); return this.stop(); }); }
class PartitionLoadBalancerImpl implements PartitionLoadBalancer { private final Logger logger = LoggerFactory.getLogger(PartitionLoadBalancerImpl.class); private final PartitionController partitionController; private final LeaseContainer leaseContainer; private final PartitionLoadBalancingStrategy partitionLoadBalancingStrategy; private final Duration leaseAcquireInterval; private final Scheduler scheduler; private CancellationTokenSource cancellationTokenSource; private volatile boolean started; private final Object lock; public PartitionLoadBalancerImpl( PartitionController partitionController, LeaseContainer leaseContainer, PartitionLoadBalancingStrategy partitionLoadBalancingStrategy, Duration leaseAcquireInterval, Scheduler scheduler) { checkNotNull(partitionController, "Argument 'partitionController' can not be null"); checkNotNull(leaseContainer, "Argument 'leaseContainer' can not be null"); checkNotNull(partitionLoadBalancingStrategy, "Argument 'partitionLoadBalancingStrategy' can not be null"); checkNotNull(scheduler, "Argument 'scheduler' can not be null"); this.partitionController = partitionController; this.leaseContainer = leaseContainer; this.partitionLoadBalancingStrategy = partitionLoadBalancingStrategy; this.leaseAcquireInterval = leaseAcquireInterval; this.scheduler = scheduler; this.started = false; this.lock = new Object(); } @Override public Mono<Void> start() { synchronized (lock) { if (this.started) { throw new IllegalStateException("Partition load balancer already started"); } this.cancellationTokenSource = new CancellationTokenSource(); this.started = true; } return Mono.fromRunnable( () -> { scheduler.schedule(() -> this.run(this.cancellationTokenSource.getToken()).subscribe()); }); } @Override public Mono<Void> stop() { synchronized (lock) { this.started = false; this.cancellationTokenSource.cancel(); } return this.partitionController.shutdown(); } @Override public boolean isRunning() { return this.started; } }
class PartitionLoadBalancerImpl implements PartitionLoadBalancer { private final Logger logger = LoggerFactory.getLogger(PartitionLoadBalancerImpl.class); private final PartitionController partitionController; private final LeaseContainer leaseContainer; private final PartitionLoadBalancingStrategy partitionLoadBalancingStrategy; private final Duration leaseAcquireInterval; private final Scheduler scheduler; private CancellationTokenSource cancellationTokenSource; private volatile boolean started; private final Object lock; public PartitionLoadBalancerImpl( PartitionController partitionController, LeaseContainer leaseContainer, PartitionLoadBalancingStrategy partitionLoadBalancingStrategy, Duration leaseAcquireInterval, Scheduler scheduler) { checkNotNull(partitionController, "Argument 'partitionController' can not be null"); checkNotNull(leaseContainer, "Argument 'leaseContainer' can not be null"); checkNotNull(partitionLoadBalancingStrategy, "Argument 'partitionLoadBalancingStrategy' can not be null"); checkNotNull(scheduler, "Argument 'scheduler' can not be null"); this.partitionController = partitionController; this.leaseContainer = leaseContainer; this.partitionLoadBalancingStrategy = partitionLoadBalancingStrategy; this.leaseAcquireInterval = leaseAcquireInterval; this.scheduler = scheduler; this.started = false; this.lock = new Object(); } @Override public Mono<Void> start() { synchronized (lock) { if (this.started) { throw new IllegalStateException("Partition load balancer already started"); } this.cancellationTokenSource = new CancellationTokenSource(); this.started = true; } return Mono.fromRunnable( () -> { scheduler.schedule(() -> this.run(this.cancellationTokenSource.getToken()).subscribe()); }); } @Override public Mono<Void> stop() { synchronized (lock) { this.started = false; this.cancellationTokenSource.cancel(); } return this.partitionController.shutdown(); } @Override public boolean isRunning() { return this.started; } }
lol yes will need it as we have two versions. So the same fix needs to apply in both versions
private Mono<Void> run(CancellationToken cancellationToken) { return Flux.just(this) .flatMap(value -> this.leaseContainer.getAllLeases()) .collectList() .flatMap(allLeases -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); List<Lease> leasesToTake = this.partitionLoadBalancingStrategy.selectLeasesToTake(allLeases); if (leasesToTake.size() > 0) { this.logger.info("Found {} total leases, taking ownership of {}", allLeases.size(), leasesToTake.size()); } if (cancellationToken.isCancellationRequested()) return Mono.empty(); return Flux.fromIterable(leasesToTake) .limitRate(1) .flatMap(lease -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); return this.partitionController.addOrUpdateLease(lease); }) .then(); }) .onErrorResume(throwable -> { logger.warn("Unexpected exception thrown while trying to acquire available leases", throwable); return Mono.empty(); }) .then( Mono.just(this) .flatMap(value -> { if (cancellationToken.isCancellationRequested()) { return Mono.empty(); } Instant stopTimer = Instant.now().plus(this.leaseAcquireInterval); return Mono.just(value) .delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL) .repeat(() -> { Instant currentTime = Instant.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }) .then(); }) ) .repeat(() -> !cancellationToken.isCancellationRequested()) .then() .onErrorResume(throwable -> { logger.info("Partition load balancer task stopped."); return this.stop(); }); }
.then();
private Mono<Void> run(CancellationToken cancellationToken) { return Flux.just(this) .flatMap(value -> this.leaseContainer.getAllLeases()) .collectList() .flatMap(allLeases -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); List<Lease> leasesToTake = this.partitionLoadBalancingStrategy.selectLeasesToTake(allLeases); if (leasesToTake.size() > 0) { this.logger.info("Found {} total leases, taking ownership of {}", allLeases.size(), leasesToTake.size()); } if (cancellationToken.isCancellationRequested()) return Mono.empty(); return Flux.fromIterable(leasesToTake) .limitRate(1) .flatMap(lease -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); return this.partitionController.addOrUpdateLease(lease); }) .then(); }) .onErrorResume(throwable -> { logger.warn("Unexpected exception thrown while trying to acquire available leases", throwable); return Mono.empty(); }) .then( Mono.just(this) .flatMap(value -> { if (cancellationToken.isCancellationRequested()) { return Mono.empty(); } Instant stopTimer = Instant.now().plus(this.leaseAcquireInterval); return Mono.just(value) .delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL) .repeat(() -> { Instant currentTime = Instant.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }) .then(); }) ) .repeat(() -> !cancellationToken.isCancellationRequested()) .then() .onErrorResume(throwable -> { logger.info("Partition load balancer task stopped."); return this.stop(); }); }
class PartitionLoadBalancerImpl implements PartitionLoadBalancer { private final Logger logger = LoggerFactory.getLogger(PartitionLoadBalancerImpl.class); private final PartitionController partitionController; private final LeaseContainer leaseContainer; private final PartitionLoadBalancingStrategy partitionLoadBalancingStrategy; private final Duration leaseAcquireInterval; private final Scheduler scheduler; private CancellationTokenSource cancellationTokenSource; private volatile boolean started; private final Object lock; public PartitionLoadBalancerImpl( PartitionController partitionController, LeaseContainer leaseContainer, PartitionLoadBalancingStrategy partitionLoadBalancingStrategy, Duration leaseAcquireInterval, Scheduler scheduler) { if (partitionController == null) { throw new IllegalArgumentException("partitionController"); } if (leaseContainer == null) { throw new IllegalArgumentException("leaseContainer"); } if (partitionLoadBalancingStrategy == null) { throw new IllegalArgumentException("partitionLoadBalancingStrategy"); } if (scheduler == null) { throw new IllegalArgumentException("executorService"); } this.partitionController = partitionController; this.leaseContainer = leaseContainer; this.partitionLoadBalancingStrategy = partitionLoadBalancingStrategy; this.leaseAcquireInterval = leaseAcquireInterval; this.scheduler = scheduler; this.started = false; this.lock = new Object(); } @Override public Mono<Void> start() { synchronized (lock) { if (this.started) { throw new IllegalStateException("Partition load balancer already started"); } this.cancellationTokenSource = new CancellationTokenSource(); this.started = true; } return Mono.fromRunnable( () -> { scheduler.schedule(() -> this.run(this.cancellationTokenSource.getToken()).subscribe()); }); } @Override public Mono<Void> stop() { synchronized (lock) { this.started = false; this.cancellationTokenSource.cancel(); } return this.partitionController.shutdown(); } @Override public boolean isRunning() { return this.started; } }
class PartitionLoadBalancerImpl implements PartitionLoadBalancer { private final Logger logger = LoggerFactory.getLogger(PartitionLoadBalancerImpl.class); private final PartitionController partitionController; private final LeaseContainer leaseContainer; private final PartitionLoadBalancingStrategy partitionLoadBalancingStrategy; private final Duration leaseAcquireInterval; private final Scheduler scheduler; private CancellationTokenSource cancellationTokenSource; private volatile boolean started; private final Object lock; public PartitionLoadBalancerImpl( PartitionController partitionController, LeaseContainer leaseContainer, PartitionLoadBalancingStrategy partitionLoadBalancingStrategy, Duration leaseAcquireInterval, Scheduler scheduler) { if (partitionController == null) { throw new IllegalArgumentException("partitionController"); } if (leaseContainer == null) { throw new IllegalArgumentException("leaseContainer"); } if (partitionLoadBalancingStrategy == null) { throw new IllegalArgumentException("partitionLoadBalancingStrategy"); } if (scheduler == null) { throw new IllegalArgumentException("executorService"); } this.partitionController = partitionController; this.leaseContainer = leaseContainer; this.partitionLoadBalancingStrategy = partitionLoadBalancingStrategy; this.leaseAcquireInterval = leaseAcquireInterval; this.scheduler = scheduler; this.started = false; this.lock = new Object(); } @Override public Mono<Void> start() { synchronized (lock) { if (this.started) { throw new IllegalStateException("Partition load balancer already started"); } this.cancellationTokenSource = new CancellationTokenSource(); this.started = true; } return Mono.fromRunnable( () -> { scheduler.schedule(() -> this.run(this.cancellationTokenSource.getToken()).subscribe()); }); } @Override public Mono<Void> stop() { synchronized (lock) { this.started = false; this.cancellationTokenSource.cancel(); } return this.partitionController.shutdown(); } @Override public boolean isRunning() { return this.started; } }
Discussed offline, will keep it
private Mono<Void> run(CancellationToken cancellationToken) { return Flux.just(this) .flatMap(value -> this.leaseContainer.getAllLeases()) .collectList() .flatMap(allLeases -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); List<Lease> leasesToTake = this.partitionLoadBalancingStrategy.selectLeasesToTake(allLeases); if (leasesToTake.size() > 0) { this.logger.info("Found {} total leases, taking ownership of {}", allLeases.size(), leasesToTake.size()); } if (cancellationToken.isCancellationRequested()) return Mono.empty(); return Flux.fromIterable(leasesToTake) .limitRate(1) .flatMap(lease -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); return this.partitionController.addOrUpdateLease(lease); }) .then(); }) .onErrorResume(throwable -> { logger.warn("Unexpected exception thrown while trying to acquire available leases", throwable); return Mono.empty(); }) .then( Mono.just(this) .flatMap(value -> { if (cancellationToken.isCancellationRequested()) { return Mono.empty(); } Instant stopTimer = Instant.now().plus(this.leaseAcquireInterval); return Mono.just(value) .delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL) .repeat(() -> { Instant currentTime = Instant.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }) .then(); }) ) .repeat(() -> !cancellationToken.isCancellationRequested()) .then() .onErrorResume(throwable -> { logger.info("Partition load balancer task stopped."); return this.stop(); }); }
.then();
private Mono<Void> run(CancellationToken cancellationToken) { return Flux.just(this) .flatMap(value -> this.leaseContainer.getAllLeases()) .collectList() .flatMap(allLeases -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); List<Lease> leasesToTake = this.partitionLoadBalancingStrategy.selectLeasesToTake(allLeases); if (leasesToTake.size() > 0) { this.logger.info("Found {} total leases, taking ownership of {}", allLeases.size(), leasesToTake.size()); } if (cancellationToken.isCancellationRequested()) return Mono.empty(); return Flux.fromIterable(leasesToTake) .limitRate(1) .flatMap(lease -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); return this.partitionController.addOrUpdateLease(lease); }) .then(); }) .onErrorResume(throwable -> { logger.warn("Unexpected exception thrown while trying to acquire available leases", throwable); return Mono.empty(); }) .then( Mono.just(this) .flatMap(value -> { if (cancellationToken.isCancellationRequested()) { return Mono.empty(); } Instant stopTimer = Instant.now().plus(this.leaseAcquireInterval); return Mono.just(value) .delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL) .repeat(() -> { Instant currentTime = Instant.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }) .then(); }) ) .repeat(() -> !cancellationToken.isCancellationRequested()) .then() .onErrorResume(throwable -> { logger.info("Partition load balancer task stopped."); return this.stop(); }); }
class PartitionLoadBalancerImpl implements PartitionLoadBalancer { private final Logger logger = LoggerFactory.getLogger(PartitionLoadBalancerImpl.class); private final PartitionController partitionController; private final LeaseContainer leaseContainer; private final PartitionLoadBalancingStrategy partitionLoadBalancingStrategy; private final Duration leaseAcquireInterval; private final Scheduler scheduler; private CancellationTokenSource cancellationTokenSource; private volatile boolean started; private final Object lock; public PartitionLoadBalancerImpl( PartitionController partitionController, LeaseContainer leaseContainer, PartitionLoadBalancingStrategy partitionLoadBalancingStrategy, Duration leaseAcquireInterval, Scheduler scheduler) { if (partitionController == null) { throw new IllegalArgumentException("partitionController"); } if (leaseContainer == null) { throw new IllegalArgumentException("leaseContainer"); } if (partitionLoadBalancingStrategy == null) { throw new IllegalArgumentException("partitionLoadBalancingStrategy"); } if (scheduler == null) { throw new IllegalArgumentException("executorService"); } this.partitionController = partitionController; this.leaseContainer = leaseContainer; this.partitionLoadBalancingStrategy = partitionLoadBalancingStrategy; this.leaseAcquireInterval = leaseAcquireInterval; this.scheduler = scheduler; this.started = false; this.lock = new Object(); } @Override public Mono<Void> start() { synchronized (lock) { if (this.started) { throw new IllegalStateException("Partition load balancer already started"); } this.cancellationTokenSource = new CancellationTokenSource(); this.started = true; } return Mono.fromRunnable( () -> { scheduler.schedule(() -> this.run(this.cancellationTokenSource.getToken()).subscribe()); }); } @Override public Mono<Void> stop() { synchronized (lock) { this.started = false; this.cancellationTokenSource.cancel(); } return this.partitionController.shutdown(); } @Override public boolean isRunning() { return this.started; } }
class PartitionLoadBalancerImpl implements PartitionLoadBalancer { private final Logger logger = LoggerFactory.getLogger(PartitionLoadBalancerImpl.class); private final PartitionController partitionController; private final LeaseContainer leaseContainer; private final PartitionLoadBalancingStrategy partitionLoadBalancingStrategy; private final Duration leaseAcquireInterval; private final Scheduler scheduler; private CancellationTokenSource cancellationTokenSource; private volatile boolean started; private final Object lock; public PartitionLoadBalancerImpl( PartitionController partitionController, LeaseContainer leaseContainer, PartitionLoadBalancingStrategy partitionLoadBalancingStrategy, Duration leaseAcquireInterval, Scheduler scheduler) { if (partitionController == null) { throw new IllegalArgumentException("partitionController"); } if (leaseContainer == null) { throw new IllegalArgumentException("leaseContainer"); } if (partitionLoadBalancingStrategy == null) { throw new IllegalArgumentException("partitionLoadBalancingStrategy"); } if (scheduler == null) { throw new IllegalArgumentException("executorService"); } this.partitionController = partitionController; this.leaseContainer = leaseContainer; this.partitionLoadBalancingStrategy = partitionLoadBalancingStrategy; this.leaseAcquireInterval = leaseAcquireInterval; this.scheduler = scheduler; this.started = false; this.lock = new Object(); } @Override public Mono<Void> start() { synchronized (lock) { if (this.started) { throw new IllegalStateException("Partition load balancer already started"); } this.cancellationTokenSource = new CancellationTokenSource(); this.started = true; } return Mono.fromRunnable( () -> { scheduler.schedule(() -> this.run(this.cancellationTokenSource.getToken()).subscribe()); }); } @Override public Mono<Void> stop() { synchronized (lock) { this.started = false; this.cancellationTokenSource.cancel(); } return this.partitionController.shutdown(); } @Override public boolean isRunning() { return this.started; } }
I combined both cases. Earlier we only do the delay on success case, now we will also need to do on error case. Since they are using the same logic, so I combined into one
private Mono<Void> run(CancellationToken cancellationToken) { return Flux.just(this) .flatMap(value -> this.leaseContainer.getAllLeases()) .collectList() .flatMap(allLeases -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); List<Lease> leasesToTake = this.partitionLoadBalancingStrategy.selectLeasesToTake(allLeases); if (leasesToTake.size() > 0) { this.logger.info("Found {} total leases, taking ownership of {}", allLeases.size(), leasesToTake.size()); } if (cancellationToken.isCancellationRequested()) return Mono.empty(); return Flux.fromIterable(leasesToTake) .limitRate(1) .flatMap(lease -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); return this.partitionController.addOrUpdateLease(lease); }) .then(); }) .onErrorResume(throwable -> { logger.warn("Unexpected exception thrown while trying to acquire available leases", throwable); return Mono.empty(); }) .then( Mono.just(this) .flatMap(value -> { if (cancellationToken.isCancellationRequested()) { return Mono.empty(); } Instant stopTimer = Instant.now().plus(this.leaseAcquireInterval); return Mono.just(value) .delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL) .repeat(() -> { Instant currentTime = Instant.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }) .then(); }) ) .repeat(() -> !cancellationToken.isCancellationRequested()) .then() .onErrorResume(throwable -> { logger.info("Partition load balancer task stopped."); return this.stop(); }); }
.then();
private Mono<Void> run(CancellationToken cancellationToken) { return Flux.just(this) .flatMap(value -> this.leaseContainer.getAllLeases()) .collectList() .flatMap(allLeases -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); List<Lease> leasesToTake = this.partitionLoadBalancingStrategy.selectLeasesToTake(allLeases); if (leasesToTake.size() > 0) { this.logger.info("Found {} total leases, taking ownership of {}", allLeases.size(), leasesToTake.size()); } if (cancellationToken.isCancellationRequested()) return Mono.empty(); return Flux.fromIterable(leasesToTake) .limitRate(1) .flatMap(lease -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); return this.partitionController.addOrUpdateLease(lease); }) .then(); }) .onErrorResume(throwable -> { logger.warn("Unexpected exception thrown while trying to acquire available leases", throwable); return Mono.empty(); }) .then( Mono.just(this) .flatMap(value -> { if (cancellationToken.isCancellationRequested()) { return Mono.empty(); } Instant stopTimer = Instant.now().plus(this.leaseAcquireInterval); return Mono.just(value) .delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL) .repeat(() -> { Instant currentTime = Instant.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }) .then(); }) ) .repeat(() -> !cancellationToken.isCancellationRequested()) .then() .onErrorResume(throwable -> { logger.info("Partition load balancer task stopped."); return this.stop(); }); }
class PartitionLoadBalancerImpl implements PartitionLoadBalancer { private final Logger logger = LoggerFactory.getLogger(PartitionLoadBalancerImpl.class); private final PartitionController partitionController; private final LeaseContainer leaseContainer; private final PartitionLoadBalancingStrategy partitionLoadBalancingStrategy; private final Duration leaseAcquireInterval; private final Scheduler scheduler; private CancellationTokenSource cancellationTokenSource; private volatile boolean started; private final Object lock; public PartitionLoadBalancerImpl( PartitionController partitionController, LeaseContainer leaseContainer, PartitionLoadBalancingStrategy partitionLoadBalancingStrategy, Duration leaseAcquireInterval, Scheduler scheduler) { checkNotNull(partitionController, "Argument 'partitionController' can not be null"); checkNotNull(leaseContainer, "Argument 'leaseContainer' can not be null"); checkNotNull(partitionLoadBalancingStrategy, "Argument 'partitionLoadBalancingStrategy' can not be null"); checkNotNull(scheduler, "Argument 'scheduler' can not be null"); this.partitionController = partitionController; this.leaseContainer = leaseContainer; this.partitionLoadBalancingStrategy = partitionLoadBalancingStrategy; this.leaseAcquireInterval = leaseAcquireInterval; this.scheduler = scheduler; this.started = false; this.lock = new Object(); } @Override public Mono<Void> start() { synchronized (lock) { if (this.started) { throw new IllegalStateException("Partition load balancer already started"); } this.cancellationTokenSource = new CancellationTokenSource(); this.started = true; } return Mono.fromRunnable( () -> { scheduler.schedule(() -> this.run(this.cancellationTokenSource.getToken()).subscribe()); }); } @Override public Mono<Void> stop() { synchronized (lock) { this.started = false; this.cancellationTokenSource.cancel(); } return this.partitionController.shutdown(); } @Override public boolean isRunning() { return this.started; } }
class PartitionLoadBalancerImpl implements PartitionLoadBalancer { private final Logger logger = LoggerFactory.getLogger(PartitionLoadBalancerImpl.class); private final PartitionController partitionController; private final LeaseContainer leaseContainer; private final PartitionLoadBalancingStrategy partitionLoadBalancingStrategy; private final Duration leaseAcquireInterval; private final Scheduler scheduler; private CancellationTokenSource cancellationTokenSource; private volatile boolean started; private final Object lock; public PartitionLoadBalancerImpl( PartitionController partitionController, LeaseContainer leaseContainer, PartitionLoadBalancingStrategy partitionLoadBalancingStrategy, Duration leaseAcquireInterval, Scheduler scheduler) { checkNotNull(partitionController, "Argument 'partitionController' can not be null"); checkNotNull(leaseContainer, "Argument 'leaseContainer' can not be null"); checkNotNull(partitionLoadBalancingStrategy, "Argument 'partitionLoadBalancingStrategy' can not be null"); checkNotNull(scheduler, "Argument 'scheduler' can not be null"); this.partitionController = partitionController; this.leaseContainer = leaseContainer; this.partitionLoadBalancingStrategy = partitionLoadBalancingStrategy; this.leaseAcquireInterval = leaseAcquireInterval; this.scheduler = scheduler; this.started = false; this.lock = new Object(); } @Override public Mono<Void> start() { synchronized (lock) { if (this.started) { throw new IllegalStateException("Partition load balancer already started"); } this.cancellationTokenSource = new CancellationTokenSource(); this.started = true; } return Mono.fromRunnable( () -> { scheduler.schedule(() -> this.run(this.cancellationTokenSource.getToken()).subscribe()); }); } @Override public Mono<Void> stop() { synchronized (lock) { this.started = false; this.cancellationTokenSource.cancel(); } return this.partitionController.shutdown(); } @Override public boolean isRunning() { return this.started; } }
recursive call?
public Mono<Lease> createLeaseIfNotExist(String leaseToken, String continuationToken) { return this.createLeaseIfNotExist(leaseToken, continuationToken); }
return this.createLeaseIfNotExist(leaseToken, continuationToken);
public Mono<Lease> createLeaseIfNotExist(String leaseToken, String continuationToken) { return this.createLeaseIfNotExist(leaseToken, continuationToken, null); }
class LeaseStoreManagerImpl implements LeaseStoreManager, LeaseStoreManager.LeaseStoreManagerBuilderDefinition { private final String LEASE_STORE_MANAGER_LEASE_SUFFIX = ".."; private final Logger logger = LoggerFactory.getLogger(LeaseStoreManagerImpl.class); private LeaseStoreManagerSettings settings; private ChangeFeedContextClient leaseDocumentClient; private RequestOptionsFactory requestOptionsFactory; private ServiceItemLeaseUpdater leaseUpdater; private LeaseStore leaseStore; public static LeaseStoreManagerBuilderDefinition builder() { return new LeaseStoreManagerImpl(); } public LeaseStoreManagerImpl() { this.settings = new LeaseStoreManagerSettings(); } @Override public LeaseStoreManagerBuilderDefinition leaseContextClient(ChangeFeedContextClient leaseContextClient) { checkNotNull(leaseContextClient, "Argument 'leaseContextClient' can not be null"); this.leaseDocumentClient = leaseContextClient; return this; } @Override public LeaseStoreManagerBuilderDefinition leasePrefix(String leasePrefix) { checkNotNull(leasePrefix, "Argument 'leasePrefix' can not be null"); this.settings.withContainerNamePrefix(leasePrefix); return this; } @Override public LeaseStoreManagerBuilderDefinition leaseCollectionLink(CosmosAsyncContainer leaseCollectionLink) { checkNotNull(leaseCollectionLink, "Argument 'leaseCollectionLink' can not be null"); this.settings.withLeaseCollectionLink(leaseCollectionLink); return this; } @Override public LeaseStoreManagerBuilderDefinition requestOptionsFactory(RequestOptionsFactory requestOptionsFactory) { checkNotNull(requestOptionsFactory, "Argument 'requestOptionsFactory' can not be null"); this.requestOptionsFactory = requestOptionsFactory; return this; } @Override public LeaseStoreManagerBuilderDefinition hostName(String hostName) { checkNotNull(hostName, "Argument 'hostName' can not be null"); this.settings.withHostName(hostName); return this; } @Override public LeaseStoreManager build() { checkNotNull(this.settings, "settings can not be null"); checkNotNull(this.settings.getContainerNamePrefix(), "settings.containerNamePrefix can not be null"); checkNotNull(this.settings.getLeaseCollectionLink(), "settings.leaseCollectionLink can not be null"); checkArgument(StringUtils.isNotEmpty(this.settings.getHostName()), "settings.getHostName can not be null nor empty"); checkNotNull(this.leaseDocumentClient, "leaseDocumentClient can not be null"); checkNotNull(this.requestOptionsFactory, "requestOptionsFactory can not be null"); if (this.leaseUpdater == null) { this.leaseUpdater = new DocumentServiceLeaseUpdaterImpl(leaseDocumentClient); } this.leaseStore = new LeaseStoreImpl( this.leaseDocumentClient, this.settings.getContainerNamePrefix(), this.settings.getLeaseCollectionLink(), this.requestOptionsFactory); return this; } @Override public Flux<Lease> getAllLeases() { return this.listDocuments(this.getPartitionLeasePrefix()) .map(documentServiceLease -> documentServiceLease); } @Override public Flux<Lease> getOwnedLeases() { return this.getAllLeases() .filter(lease -> lease.getOwner() != null && lease.getOwner().equalsIgnoreCase(this.settings.getHostName())); } @Override @Override public Mono<Lease> createLeaseIfNotExist(String leaseToken, String continuationToken, Map<String, String> properties) { throw new UnsupportedOperationException("partition key based leases are not supported for Change Feed V1 wire format"); } @Override public Mono<Lease> createLeaseIfNotExist(FeedRangeEpkImpl feedRange, String continuationToken) { return this.createLeaseIfNotExist(feedRange, continuationToken, null); } @Override public Mono<Lease> createLeaseIfNotExist(FeedRangeEpkImpl feedRange, String continuationToken, Map<String, String> properties) { checkNotNull(feedRange, "Argument 'feedRanges' should not be null"); String leaseToken = feedRange.getRange().getMin() + "-" + feedRange.getRange().getMax(); String leaseDocId = this.getDocumentId(leaseToken); ServiceItemLeaseV1 documentServiceLease = new ServiceItemLeaseV1() .withVersion(LeaseVersion.EPK_RANGE_BASED_LEASE) .withId(leaseDocId) .withLeaseToken(leaseToken) .withFeedRange(feedRange) .withContinuationToken(continuationToken) .withProperties(properties); return this.leaseDocumentClient.createItem(this.settings.getLeaseCollectionLink(), documentServiceLease, null, false) .onErrorResume( ex -> { if (ex instanceof CosmosException) { CosmosException e = (CosmosException) ex; if (Exceptions.isConflict(e)) { logger.info("Some other host created lease for {}.", leaseToken); return Mono.empty(); } } return Mono.error(ex); }) .map(documentResourceResponse -> { if (documentResourceResponse == null) { return null; } InternalObjectNode document = BridgeInternal.getProperties(documentResourceResponse); return documentServiceLease .withId(document.getId()) .withETag(document.getETag()) .withTs(ModelBridgeInternal.getStringFromJsonSerializable(document, Constants.Properties.LAST_MODIFIED)); }); } @Override public Mono<Void> delete(Lease lease) { if (lease == null || lease.getId() == null) { throw new IllegalArgumentException("lease"); } return this.leaseDocumentClient .deleteItem(lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease)) .onErrorResume( ex -> { if (ex instanceof CosmosException) { CosmosException e = (CosmosException) ex; if (Exceptions.isNotFound(e)) { return Mono.empty(); } } return Mono.error(ex); }) .map( documentResourceResponse -> true) .then(); } @Override public Mono<Void> deleteAll(List<Lease> leases) { checkNotNull(leases, "Argument 'leases' can not be null"); logger.info("Deleting all leases"); Map<String, CosmosItemIdentity> cosmosIdentityMap = new HashMap<>(); for (Lease lease : leases) { cosmosIdentityMap.put(lease.getId(), new CosmosItemIdentity(new PartitionKey(lease.getId()), lease.getId())); } return Mono.defer(() -> Mono.just(cosmosIdentityMap)) .flatMapMany(itemIdentities -> this.leaseDocumentClient.deleteAllItems(cosmosIdentityMap.values().stream().collect(Collectors.toList()))) .flatMap(itemResponse -> { if (itemResponse.getResponse() != null && itemResponse.getResponse().isSuccessStatusCode()) { cosmosIdentityMap.remove(itemResponse.getOperation().getId()); } else { int effectiveStatusCode = 0; int effectiveSubStatusCode = 0; if (itemResponse.getResponse() != null) { effectiveStatusCode = itemResponse.getResponse().getStatusCode(); effectiveSubStatusCode = itemResponse.getResponse().getStatusCode(); } else if (itemResponse.getException() != null && itemResponse.getException() instanceof CosmosException) { CosmosException cosmosException = (CosmosException) itemResponse.getException(); effectiveStatusCode = cosmosException.getStatusCode(); effectiveSubStatusCode = cosmosException.getSubStatusCode(); } if (effectiveStatusCode == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND && effectiveSubStatusCode == 0) { cosmosIdentityMap.remove(itemResponse.getOperation().getId()); } } return Mono.empty(); }) .repeat(() -> cosmosIdentityMap.size() != 0) .then(); } @Override public Mono<Lease> acquire(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } String oldOwner = lease.getOwner(); return this.leaseUpdater.updateLease( lease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(oldOwner)) { logger.info("Lease with token {} : lease was acquired already by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setOwner(this.settings.getHostName()); serverLease.setProperties(lease.getProperties()); return serverLease; }); } @Override public Mono<Void> release(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } return this.leaseDocumentClient.readItem(lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), InternalObjectNode.class) .onErrorResume( ex -> { if (ex instanceof CosmosException) { CosmosException e = (CosmosException) ex; if (Exceptions.isNotFound(e)) { logger.info("Lease with token {} : failed to renew lease. The lease is gone already.", lease.getLeaseToken()); throw new LeaseLostException(lease); } } return Mono.error(ex); }) .map( documentResourceResponse -> ServiceItemLeaseV1.fromDocument(BridgeInternal.getProperties(documentResourceResponse))) .flatMap( refreshedLease -> this.leaseUpdater.updateLease( refreshedLease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Lease with token {} : no need to release lease. The lease was already taken by another host '{}'.", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setOwner(null); return serverLease; }) ).then(); } @Override public Mono<Lease> renew(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } return this.leaseDocumentClient.readItem(lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), InternalObjectNode.class) .onErrorResume( ex -> { if (ex instanceof CosmosException) { CosmosException e = (CosmosException) ex; if (Exceptions.isNotFound(e)) { logger.info("Lease with token {} : failed to renew lease. The lease is gone already.", lease.getLeaseToken()); throw new LeaseLostException(lease); } } return Mono.error(ex); }) .map( documentResourceResponse -> ServiceItemLeaseV1.fromDocument(BridgeInternal.getProperties(documentResourceResponse))) .flatMap( refreshedLease -> this.leaseUpdater.updateLease( refreshedLease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() == null) { logger.info("Lease with token {} : lease was taken over and released by a different owner", lease.getLeaseToken()); throw new LeaseLostException(lease); } else if (!serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Lease with token {} : lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } return serverLease; }) ); } @Override public Mono<Lease> updateProperties(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } if (lease.getOwner() != null && !lease.getOwner().equalsIgnoreCase(this.settings.getHostName())) { logger.info("Lease with token '{}' : lease was taken over by owner '{}' before lease item update", lease.getLeaseToken(), lease.getOwner()); throw new LeaseLostException(lease); } return this.leaseUpdater.updateLease( lease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Lease with token '{}' : lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setProperties(lease.getProperties()); return serverLease; }); } @Override public Mono<Lease> checkpoint(Lease lease, String continuationToken, CancellationToken cancellationToken) { if (lease == null) { throw new IllegalArgumentException("lease"); } if (continuationToken == null || continuationToken.isEmpty()) { throw new IllegalArgumentException("continuationToken must be a non-empty string"); } if (cancellationToken.isCancellationRequested()) return Mono.error(new TaskCancelledException()); return this.leaseDocumentClient.readItem(lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), InternalObjectNode.class) .map(documentResourceResponse -> ServiceItemLeaseV1.fromDocument(BridgeInternal.getProperties(documentResourceResponse))) .flatMap(refreshedLease -> { if (cancellationToken.isCancellationRequested()) return Mono.error(new TaskCancelledException()); return this.leaseUpdater.updateLease( refreshedLease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() == null) { logger.info("Lease with token {} : lease was taken over and released by a different owner", lease.getLeaseToken()); throw new LeaseLostException(lease); } else if (!serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Lease with token {} : lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setContinuationToken(continuationToken); return serverLease; }); }) .doOnError(throwable -> { logger.info("Lease with token {} : lease with token '{}' failed to checkpoint for owner '{}' with continuation token '{}'", lease.getLeaseToken(), lease.getConcurrencyToken(), lease.getOwner(), lease.getReadableContinuationToken()); }); } @Override public Mono<Boolean> isInitialized() { return this.leaseStore.isInitialized(); } @Override public Mono<Boolean> markInitialized() { return this.leaseStore.markInitialized(); } @Override public Mono<Boolean> acquireInitializationLock(Duration lockExpirationTime) { return this.leaseStore.acquireInitializationLock(lockExpirationTime); } @Override public Mono<Boolean> releaseInitializationLock() { return this.leaseStore.releaseInitializationLock(); } private Flux<ServiceItemLeaseV1> listDocuments(String prefix) { if (prefix == null || prefix.isEmpty()) { throw new IllegalArgumentException("prefix"); } SqlParameter param = new SqlParameter(); param.setName("@PartitionLeasePrefix"); param.setValue(prefix); SqlQuerySpec querySpec = new SqlQuerySpec( "SELECT * FROM c WHERE STARTSWITH(c.id, @PartitionLeasePrefix)", Collections.singletonList(param)); Flux<FeedResponse<InternalObjectNode>> query = this.leaseDocumentClient.queryItems( this.settings.getLeaseCollectionLink(), querySpec, this.requestOptionsFactory.createQueryRequestOptions(), InternalObjectNode.class); return query.flatMap( documentFeedResponse -> Flux.fromIterable(documentFeedResponse.getResults())) .map(ServiceItemLeaseV1::fromDocument); } private String getDocumentId(String leaseToken) { return this.getPartitionLeasePrefix() + leaseToken; } private String getPartitionLeasePrefix() { return this.settings.getContainerNamePrefix() + LEASE_STORE_MANAGER_LEASE_SUFFIX; } }
class LeaseStoreManagerImpl implements LeaseStoreManager, LeaseStoreManager.LeaseStoreManagerBuilderDefinition { private final String LEASE_STORE_MANAGER_LEASE_SUFFIX = ".."; private final Logger logger = LoggerFactory.getLogger(LeaseStoreManagerImpl.class); private LeaseStoreManagerSettings settings; private ChangeFeedContextClient leaseDocumentClient; private RequestOptionsFactory requestOptionsFactory; private ServiceItemLeaseUpdater leaseUpdater; private LeaseStore leaseStore; public static LeaseStoreManagerBuilderDefinition builder() { return new LeaseStoreManagerImpl(); } public LeaseStoreManagerImpl() { this.settings = new LeaseStoreManagerSettings(); } @Override public LeaseStoreManagerBuilderDefinition leaseContextClient(ChangeFeedContextClient leaseContextClient) { checkNotNull(leaseContextClient, "Argument 'leaseContextClient' can not be null"); this.leaseDocumentClient = leaseContextClient; return this; } @Override public LeaseStoreManagerBuilderDefinition leasePrefix(String leasePrefix) { checkNotNull(leasePrefix, "Argument 'leasePrefix' can not be null"); this.settings.withContainerNamePrefix(leasePrefix); return this; } @Override public LeaseStoreManagerBuilderDefinition leaseCollectionLink(CosmosAsyncContainer leaseCollectionLink) { checkNotNull(leaseCollectionLink, "Argument 'leaseCollectionLink' can not be null"); this.settings.withLeaseCollectionLink(leaseCollectionLink); return this; } @Override public LeaseStoreManagerBuilderDefinition requestOptionsFactory(RequestOptionsFactory requestOptionsFactory) { checkNotNull(requestOptionsFactory, "Argument 'requestOptionsFactory' can not be null"); this.requestOptionsFactory = requestOptionsFactory; return this; } @Override public LeaseStoreManagerBuilderDefinition hostName(String hostName) { checkNotNull(hostName, "Argument 'hostName' can not be null"); this.settings.withHostName(hostName); return this; } @Override public LeaseStoreManager build() { checkNotNull(this.settings, "settings can not be null"); checkNotNull(this.settings.getContainerNamePrefix(), "settings.containerNamePrefix can not be null"); checkNotNull(this.settings.getLeaseCollectionLink(), "settings.leaseCollectionLink can not be null"); checkArgument(StringUtils.isNotEmpty(this.settings.getHostName()), "settings.getHostName can not be null nor empty"); checkNotNull(this.leaseDocumentClient, "leaseDocumentClient can not be null"); checkNotNull(this.requestOptionsFactory, "requestOptionsFactory can not be null"); if (this.leaseUpdater == null) { this.leaseUpdater = new DocumentServiceLeaseUpdaterImpl(leaseDocumentClient); } this.leaseStore = new LeaseStoreImpl( this.leaseDocumentClient, this.settings.getContainerNamePrefix(), this.settings.getLeaseCollectionLink(), this.requestOptionsFactory); return this; } @Override public Flux<Lease> getAllLeases() { return this.listDocuments(this.getPartitionLeasePrefix()) .map(documentServiceLease -> documentServiceLease); } @Override public Flux<Lease> getOwnedLeases() { return this.getAllLeases() .filter(lease -> lease.getOwner() != null && lease.getOwner().equalsIgnoreCase(this.settings.getHostName())); } @Override @Override public Mono<Lease> createLeaseIfNotExist(String leaseToken, String continuationToken, Map<String, String> properties) { throw new UnsupportedOperationException("partition key based leases are not supported for Change Feed V1 wire format"); } @Override public Mono<Lease> createLeaseIfNotExist(FeedRangeEpkImpl feedRange, String continuationToken) { return this.createLeaseIfNotExist(feedRange, continuationToken, null); } @Override public Mono<Lease> createLeaseIfNotExist(FeedRangeEpkImpl feedRange, String continuationToken, Map<String, String> properties) { checkNotNull(feedRange, "Argument 'feedRanges' should not be null"); String leaseToken = feedRange.getRange().getMin() + "-" + feedRange.getRange().getMax(); String leaseDocId = this.getDocumentId(leaseToken); ServiceItemLeaseV1 documentServiceLease = new ServiceItemLeaseV1() .withVersion(LeaseVersion.EPK_RANGE_BASED_LEASE) .withId(leaseDocId) .withLeaseToken(leaseToken) .withFeedRange(feedRange) .withContinuationToken(continuationToken) .withProperties(properties); return this.leaseDocumentClient.createItem(this.settings.getLeaseCollectionLink(), documentServiceLease, null, false) .onErrorResume( ex -> { if (ex instanceof CosmosException) { CosmosException e = (CosmosException) ex; if (Exceptions.isConflict(e)) { logger.info("Some other host created lease for {}.", leaseToken); return Mono.empty(); } } return Mono.error(ex); }) .map(documentResourceResponse -> { if (documentResourceResponse == null) { return null; } InternalObjectNode document = BridgeInternal.getProperties(documentResourceResponse); return documentServiceLease .withId(document.getId()) .withETag(document.getETag()) .withTs(ModelBridgeInternal.getStringFromJsonSerializable(document, Constants.Properties.LAST_MODIFIED)); }); } @Override public Mono<Void> delete(Lease lease) { if (lease == null || lease.getId() == null) { throw new IllegalArgumentException("lease"); } return this.leaseDocumentClient .deleteItem(lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease)) .onErrorResume( ex -> { if (ex instanceof CosmosException) { CosmosException e = (CosmosException) ex; if (Exceptions.isNotFound(e)) { return Mono.empty(); } } return Mono.error(ex); }) .map( documentResourceResponse -> true) .then(); } @Override public Mono<Void> deleteAll(List<Lease> leases) { checkNotNull(leases, "Argument 'leases' can not be null"); logger.info("Deleting all leases"); Map<String, CosmosItemIdentity> cosmosIdentityMap = new HashMap<>(); for (Lease lease : leases) { cosmosIdentityMap.put(lease.getId(), new CosmosItemIdentity(new PartitionKey(lease.getId()), lease.getId())); } return Mono.defer(() -> Mono.just(cosmosIdentityMap)) .flatMapMany(itemIdentities -> this.leaseDocumentClient.deleteAllItems(cosmosIdentityMap.values().stream().collect(Collectors.toList()))) .flatMap(itemResponse -> { if (itemResponse.getResponse() != null && itemResponse.getResponse().isSuccessStatusCode()) { cosmosIdentityMap.remove(itemResponse.getOperation().getId()); } else { int effectiveStatusCode = 0; int effectiveSubStatusCode = 0; if (itemResponse.getResponse() != null) { effectiveStatusCode = itemResponse.getResponse().getStatusCode(); effectiveSubStatusCode = itemResponse.getResponse().getStatusCode(); } else if (itemResponse.getException() != null && itemResponse.getException() instanceof CosmosException) { CosmosException cosmosException = (CosmosException) itemResponse.getException(); effectiveStatusCode = cosmosException.getStatusCode(); effectiveSubStatusCode = cosmosException.getSubStatusCode(); } if (effectiveStatusCode == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND && effectiveSubStatusCode == 0) { cosmosIdentityMap.remove(itemResponse.getOperation().getId()); } } return Mono.empty(); }) .repeat(() -> cosmosIdentityMap.size() != 0) .then(); } @Override public Mono<Lease> acquire(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } String oldOwner = lease.getOwner(); return this.leaseUpdater.updateLease( lease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(oldOwner)) { logger.info("Lease with token {} : lease was acquired already by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setOwner(this.settings.getHostName()); serverLease.setProperties(lease.getProperties()); return serverLease; }); } @Override public Mono<Void> release(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } return this.leaseDocumentClient.readItem(lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), InternalObjectNode.class) .onErrorResume( ex -> { if (ex instanceof CosmosException) { CosmosException e = (CosmosException) ex; if (Exceptions.isNotFound(e)) { logger.info("Lease with token {} : failed to renew lease. The lease is gone already.", lease.getLeaseToken()); throw new LeaseLostException(lease); } } return Mono.error(ex); }) .map( documentResourceResponse -> ServiceItemLeaseV1.fromDocument(BridgeInternal.getProperties(documentResourceResponse))) .flatMap( refreshedLease -> this.leaseUpdater.updateLease( refreshedLease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Lease with token {} : no need to release lease. The lease was already taken by another host '{}'.", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setOwner(null); return serverLease; }) ).then(); } @Override public Mono<Lease> renew(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } return this.leaseDocumentClient.readItem(lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), InternalObjectNode.class) .onErrorResume( ex -> { if (ex instanceof CosmosException) { CosmosException e = (CosmosException) ex; if (Exceptions.isNotFound(e)) { logger.info("Lease with token {} : failed to renew lease. The lease is gone already.", lease.getLeaseToken()); throw new LeaseLostException(lease); } } return Mono.error(ex); }) .map( documentResourceResponse -> ServiceItemLeaseV1.fromDocument(BridgeInternal.getProperties(documentResourceResponse))) .flatMap( refreshedLease -> this.leaseUpdater.updateLease( refreshedLease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() == null) { logger.info("Lease with token {} : lease was taken over and released by a different owner", lease.getLeaseToken()); throw new LeaseLostException(lease); } else if (!serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Lease with token {} : lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } return serverLease; }) ); } @Override public Mono<Lease> updateProperties(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } if (lease.getOwner() != null && !lease.getOwner().equalsIgnoreCase(this.settings.getHostName())) { logger.info("Lease with token '{}' : lease was taken over by owner '{}' before lease item update", lease.getLeaseToken(), lease.getOwner()); throw new LeaseLostException(lease); } return this.leaseUpdater.updateLease( lease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Lease with token '{}' : lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setProperties(lease.getProperties()); return serverLease; }); } @Override public Mono<Lease> checkpoint(Lease lease, String continuationToken, CancellationToken cancellationToken) { if (lease == null) { throw new IllegalArgumentException("lease"); } if (continuationToken == null || continuationToken.isEmpty()) { throw new IllegalArgumentException("continuationToken must be a non-empty string"); } if (cancellationToken.isCancellationRequested()) return Mono.error(new TaskCancelledException()); return this.leaseDocumentClient.readItem(lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), InternalObjectNode.class) .map(documentResourceResponse -> ServiceItemLeaseV1.fromDocument(BridgeInternal.getProperties(documentResourceResponse))) .flatMap(refreshedLease -> { if (cancellationToken.isCancellationRequested()) return Mono.error(new TaskCancelledException()); return this.leaseUpdater.updateLease( refreshedLease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() == null) { logger.info("Lease with token {} : lease was taken over and released by a different owner", lease.getLeaseToken()); throw new LeaseLostException(lease); } else if (!serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Lease with token {} : lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setContinuationToken(continuationToken); return serverLease; }); }) .doOnError(throwable -> { logger.info("Lease with token {} : lease with token '{}' failed to checkpoint for owner '{}' with continuation token '{}'", lease.getLeaseToken(), lease.getConcurrencyToken(), lease.getOwner(), lease.getReadableContinuationToken()); }); } @Override public Mono<Boolean> isInitialized() { return this.leaseStore.isInitialized(); } @Override public Mono<Boolean> markInitialized() { return this.leaseStore.markInitialized(); } @Override public Mono<Boolean> acquireInitializationLock(Duration lockExpirationTime) { return this.leaseStore.acquireInitializationLock(lockExpirationTime); } @Override public Mono<Boolean> releaseInitializationLock() { return this.leaseStore.releaseInitializationLock(); } private Flux<ServiceItemLeaseV1> listDocuments(String prefix) { if (prefix == null || prefix.isEmpty()) { throw new IllegalArgumentException("prefix"); } SqlParameter param = new SqlParameter(); param.setName("@PartitionLeasePrefix"); param.setValue(prefix); SqlQuerySpec querySpec = new SqlQuerySpec( "SELECT * FROM c WHERE STARTSWITH(c.id, @PartitionLeasePrefix)", Collections.singletonList(param)); Flux<FeedResponse<InternalObjectNode>> query = this.leaseDocumentClient.queryItems( this.settings.getLeaseCollectionLink(), querySpec, this.requestOptionsFactory.createQueryRequestOptions(), InternalObjectNode.class); return query.flatMap( documentFeedResponse -> Flux.fromIterable(documentFeedResponse.getResults())) .map(ServiceItemLeaseV1::fromDocument); } private String getDocumentId(String leaseToken) { return this.getPartitionLeasePrefix() + leaseToken; } private String getPartitionLeasePrefix() { return this.settings.getContainerNamePrefix() + LEASE_STORE_MANAGER_LEASE_SUFFIX; } }
Good catch, updated
public Mono<Lease> createLeaseIfNotExist(String leaseToken, String continuationToken) { return this.createLeaseIfNotExist(leaseToken, continuationToken); }
return this.createLeaseIfNotExist(leaseToken, continuationToken);
public Mono<Lease> createLeaseIfNotExist(String leaseToken, String continuationToken) { return this.createLeaseIfNotExist(leaseToken, continuationToken, null); }
class LeaseStoreManagerImpl implements LeaseStoreManager, LeaseStoreManager.LeaseStoreManagerBuilderDefinition { private final String LEASE_STORE_MANAGER_LEASE_SUFFIX = ".."; private final Logger logger = LoggerFactory.getLogger(LeaseStoreManagerImpl.class); private LeaseStoreManagerSettings settings; private ChangeFeedContextClient leaseDocumentClient; private RequestOptionsFactory requestOptionsFactory; private ServiceItemLeaseUpdater leaseUpdater; private LeaseStore leaseStore; public static LeaseStoreManagerBuilderDefinition builder() { return new LeaseStoreManagerImpl(); } public LeaseStoreManagerImpl() { this.settings = new LeaseStoreManagerSettings(); } @Override public LeaseStoreManagerBuilderDefinition leaseContextClient(ChangeFeedContextClient leaseContextClient) { checkNotNull(leaseContextClient, "Argument 'leaseContextClient' can not be null"); this.leaseDocumentClient = leaseContextClient; return this; } @Override public LeaseStoreManagerBuilderDefinition leasePrefix(String leasePrefix) { checkNotNull(leasePrefix, "Argument 'leasePrefix' can not be null"); this.settings.withContainerNamePrefix(leasePrefix); return this; } @Override public LeaseStoreManagerBuilderDefinition leaseCollectionLink(CosmosAsyncContainer leaseCollectionLink) { checkNotNull(leaseCollectionLink, "Argument 'leaseCollectionLink' can not be null"); this.settings.withLeaseCollectionLink(leaseCollectionLink); return this; } @Override public LeaseStoreManagerBuilderDefinition requestOptionsFactory(RequestOptionsFactory requestOptionsFactory) { checkNotNull(requestOptionsFactory, "Argument 'requestOptionsFactory' can not be null"); this.requestOptionsFactory = requestOptionsFactory; return this; } @Override public LeaseStoreManagerBuilderDefinition hostName(String hostName) { checkNotNull(hostName, "Argument 'hostName' can not be null"); this.settings.withHostName(hostName); return this; } @Override public LeaseStoreManager build() { checkNotNull(this.settings, "settings can not be null"); checkNotNull(this.settings.getContainerNamePrefix(), "settings.containerNamePrefix can not be null"); checkNotNull(this.settings.getLeaseCollectionLink(), "settings.leaseCollectionLink can not be null"); checkArgument(StringUtils.isNotEmpty(this.settings.getHostName()), "settings.getHostName can not be null nor empty"); checkNotNull(this.leaseDocumentClient, "leaseDocumentClient can not be null"); checkNotNull(this.requestOptionsFactory, "requestOptionsFactory can not be null"); if (this.leaseUpdater == null) { this.leaseUpdater = new DocumentServiceLeaseUpdaterImpl(leaseDocumentClient); } this.leaseStore = new LeaseStoreImpl( this.leaseDocumentClient, this.settings.getContainerNamePrefix(), this.settings.getLeaseCollectionLink(), this.requestOptionsFactory); return this; } @Override public Flux<Lease> getAllLeases() { return this.listDocuments(this.getPartitionLeasePrefix()) .map(documentServiceLease -> documentServiceLease); } @Override public Flux<Lease> getOwnedLeases() { return this.getAllLeases() .filter(lease -> lease.getOwner() != null && lease.getOwner().equalsIgnoreCase(this.settings.getHostName())); } @Override @Override public Mono<Lease> createLeaseIfNotExist(String leaseToken, String continuationToken, Map<String, String> properties) { throw new UnsupportedOperationException("partition key based leases are not supported for Change Feed V1 wire format"); } @Override public Mono<Lease> createLeaseIfNotExist(FeedRangeEpkImpl feedRange, String continuationToken) { return this.createLeaseIfNotExist(feedRange, continuationToken, null); } @Override public Mono<Lease> createLeaseIfNotExist(FeedRangeEpkImpl feedRange, String continuationToken, Map<String, String> properties) { checkNotNull(feedRange, "Argument 'feedRanges' should not be null"); String leaseToken = feedRange.getRange().getMin() + "-" + feedRange.getRange().getMax(); String leaseDocId = this.getDocumentId(leaseToken); ServiceItemLeaseV1 documentServiceLease = new ServiceItemLeaseV1() .withVersion(LeaseVersion.EPK_RANGE_BASED_LEASE) .withId(leaseDocId) .withLeaseToken(leaseToken) .withFeedRange(feedRange) .withContinuationToken(continuationToken) .withProperties(properties); return this.leaseDocumentClient.createItem(this.settings.getLeaseCollectionLink(), documentServiceLease, null, false) .onErrorResume( ex -> { if (ex instanceof CosmosException) { CosmosException e = (CosmosException) ex; if (Exceptions.isConflict(e)) { logger.info("Some other host created lease for {}.", leaseToken); return Mono.empty(); } } return Mono.error(ex); }) .map(documentResourceResponse -> { if (documentResourceResponse == null) { return null; } InternalObjectNode document = BridgeInternal.getProperties(documentResourceResponse); return documentServiceLease .withId(document.getId()) .withETag(document.getETag()) .withTs(ModelBridgeInternal.getStringFromJsonSerializable(document, Constants.Properties.LAST_MODIFIED)); }); } @Override public Mono<Void> delete(Lease lease) { if (lease == null || lease.getId() == null) { throw new IllegalArgumentException("lease"); } return this.leaseDocumentClient .deleteItem(lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease)) .onErrorResume( ex -> { if (ex instanceof CosmosException) { CosmosException e = (CosmosException) ex; if (Exceptions.isNotFound(e)) { return Mono.empty(); } } return Mono.error(ex); }) .map( documentResourceResponse -> true) .then(); } @Override public Mono<Void> deleteAll(List<Lease> leases) { checkNotNull(leases, "Argument 'leases' can not be null"); logger.info("Deleting all leases"); Map<String, CosmosItemIdentity> cosmosIdentityMap = new HashMap<>(); for (Lease lease : leases) { cosmosIdentityMap.put(lease.getId(), new CosmosItemIdentity(new PartitionKey(lease.getId()), lease.getId())); } return Mono.defer(() -> Mono.just(cosmosIdentityMap)) .flatMapMany(itemIdentities -> this.leaseDocumentClient.deleteAllItems(cosmosIdentityMap.values().stream().collect(Collectors.toList()))) .flatMap(itemResponse -> { if (itemResponse.getResponse() != null && itemResponse.getResponse().isSuccessStatusCode()) { cosmosIdentityMap.remove(itemResponse.getOperation().getId()); } else { int effectiveStatusCode = 0; int effectiveSubStatusCode = 0; if (itemResponse.getResponse() != null) { effectiveStatusCode = itemResponse.getResponse().getStatusCode(); effectiveSubStatusCode = itemResponse.getResponse().getStatusCode(); } else if (itemResponse.getException() != null && itemResponse.getException() instanceof CosmosException) { CosmosException cosmosException = (CosmosException) itemResponse.getException(); effectiveStatusCode = cosmosException.getStatusCode(); effectiveSubStatusCode = cosmosException.getSubStatusCode(); } if (effectiveStatusCode == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND && effectiveSubStatusCode == 0) { cosmosIdentityMap.remove(itemResponse.getOperation().getId()); } } return Mono.empty(); }) .repeat(() -> cosmosIdentityMap.size() != 0) .then(); } @Override public Mono<Lease> acquire(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } String oldOwner = lease.getOwner(); return this.leaseUpdater.updateLease( lease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(oldOwner)) { logger.info("Lease with token {} : lease was acquired already by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setOwner(this.settings.getHostName()); serverLease.setProperties(lease.getProperties()); return serverLease; }); } @Override public Mono<Void> release(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } return this.leaseDocumentClient.readItem(lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), InternalObjectNode.class) .onErrorResume( ex -> { if (ex instanceof CosmosException) { CosmosException e = (CosmosException) ex; if (Exceptions.isNotFound(e)) { logger.info("Lease with token {} : failed to renew lease. The lease is gone already.", lease.getLeaseToken()); throw new LeaseLostException(lease); } } return Mono.error(ex); }) .map( documentResourceResponse -> ServiceItemLeaseV1.fromDocument(BridgeInternal.getProperties(documentResourceResponse))) .flatMap( refreshedLease -> this.leaseUpdater.updateLease( refreshedLease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Lease with token {} : no need to release lease. The lease was already taken by another host '{}'.", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setOwner(null); return serverLease; }) ).then(); } @Override public Mono<Lease> renew(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } return this.leaseDocumentClient.readItem(lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), InternalObjectNode.class) .onErrorResume( ex -> { if (ex instanceof CosmosException) { CosmosException e = (CosmosException) ex; if (Exceptions.isNotFound(e)) { logger.info("Lease with token {} : failed to renew lease. The lease is gone already.", lease.getLeaseToken()); throw new LeaseLostException(lease); } } return Mono.error(ex); }) .map( documentResourceResponse -> ServiceItemLeaseV1.fromDocument(BridgeInternal.getProperties(documentResourceResponse))) .flatMap( refreshedLease -> this.leaseUpdater.updateLease( refreshedLease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() == null) { logger.info("Lease with token {} : lease was taken over and released by a different owner", lease.getLeaseToken()); throw new LeaseLostException(lease); } else if (!serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Lease with token {} : lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } return serverLease; }) ); } @Override public Mono<Lease> updateProperties(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } if (lease.getOwner() != null && !lease.getOwner().equalsIgnoreCase(this.settings.getHostName())) { logger.info("Lease with token '{}' : lease was taken over by owner '{}' before lease item update", lease.getLeaseToken(), lease.getOwner()); throw new LeaseLostException(lease); } return this.leaseUpdater.updateLease( lease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Lease with token '{}' : lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setProperties(lease.getProperties()); return serverLease; }); } @Override public Mono<Lease> checkpoint(Lease lease, String continuationToken, CancellationToken cancellationToken) { if (lease == null) { throw new IllegalArgumentException("lease"); } if (continuationToken == null || continuationToken.isEmpty()) { throw new IllegalArgumentException("continuationToken must be a non-empty string"); } if (cancellationToken.isCancellationRequested()) return Mono.error(new TaskCancelledException()); return this.leaseDocumentClient.readItem(lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), InternalObjectNode.class) .map(documentResourceResponse -> ServiceItemLeaseV1.fromDocument(BridgeInternal.getProperties(documentResourceResponse))) .flatMap(refreshedLease -> { if (cancellationToken.isCancellationRequested()) return Mono.error(new TaskCancelledException()); return this.leaseUpdater.updateLease( refreshedLease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() == null) { logger.info("Lease with token {} : lease was taken over and released by a different owner", lease.getLeaseToken()); throw new LeaseLostException(lease); } else if (!serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Lease with token {} : lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setContinuationToken(continuationToken); return serverLease; }); }) .doOnError(throwable -> { logger.info("Lease with token {} : lease with token '{}' failed to checkpoint for owner '{}' with continuation token '{}'", lease.getLeaseToken(), lease.getConcurrencyToken(), lease.getOwner(), lease.getReadableContinuationToken()); }); } @Override public Mono<Boolean> isInitialized() { return this.leaseStore.isInitialized(); } @Override public Mono<Boolean> markInitialized() { return this.leaseStore.markInitialized(); } @Override public Mono<Boolean> acquireInitializationLock(Duration lockExpirationTime) { return this.leaseStore.acquireInitializationLock(lockExpirationTime); } @Override public Mono<Boolean> releaseInitializationLock() { return this.leaseStore.releaseInitializationLock(); } private Flux<ServiceItemLeaseV1> listDocuments(String prefix) { if (prefix == null || prefix.isEmpty()) { throw new IllegalArgumentException("prefix"); } SqlParameter param = new SqlParameter(); param.setName("@PartitionLeasePrefix"); param.setValue(prefix); SqlQuerySpec querySpec = new SqlQuerySpec( "SELECT * FROM c WHERE STARTSWITH(c.id, @PartitionLeasePrefix)", Collections.singletonList(param)); Flux<FeedResponse<InternalObjectNode>> query = this.leaseDocumentClient.queryItems( this.settings.getLeaseCollectionLink(), querySpec, this.requestOptionsFactory.createQueryRequestOptions(), InternalObjectNode.class); return query.flatMap( documentFeedResponse -> Flux.fromIterable(documentFeedResponse.getResults())) .map(ServiceItemLeaseV1::fromDocument); } private String getDocumentId(String leaseToken) { return this.getPartitionLeasePrefix() + leaseToken; } private String getPartitionLeasePrefix() { return this.settings.getContainerNamePrefix() + LEASE_STORE_MANAGER_LEASE_SUFFIX; } }
class LeaseStoreManagerImpl implements LeaseStoreManager, LeaseStoreManager.LeaseStoreManagerBuilderDefinition { private final String LEASE_STORE_MANAGER_LEASE_SUFFIX = ".."; private final Logger logger = LoggerFactory.getLogger(LeaseStoreManagerImpl.class); private LeaseStoreManagerSettings settings; private ChangeFeedContextClient leaseDocumentClient; private RequestOptionsFactory requestOptionsFactory; private ServiceItemLeaseUpdater leaseUpdater; private LeaseStore leaseStore; public static LeaseStoreManagerBuilderDefinition builder() { return new LeaseStoreManagerImpl(); } public LeaseStoreManagerImpl() { this.settings = new LeaseStoreManagerSettings(); } @Override public LeaseStoreManagerBuilderDefinition leaseContextClient(ChangeFeedContextClient leaseContextClient) { checkNotNull(leaseContextClient, "Argument 'leaseContextClient' can not be null"); this.leaseDocumentClient = leaseContextClient; return this; } @Override public LeaseStoreManagerBuilderDefinition leasePrefix(String leasePrefix) { checkNotNull(leasePrefix, "Argument 'leasePrefix' can not be null"); this.settings.withContainerNamePrefix(leasePrefix); return this; } @Override public LeaseStoreManagerBuilderDefinition leaseCollectionLink(CosmosAsyncContainer leaseCollectionLink) { checkNotNull(leaseCollectionLink, "Argument 'leaseCollectionLink' can not be null"); this.settings.withLeaseCollectionLink(leaseCollectionLink); return this; } @Override public LeaseStoreManagerBuilderDefinition requestOptionsFactory(RequestOptionsFactory requestOptionsFactory) { checkNotNull(requestOptionsFactory, "Argument 'requestOptionsFactory' can not be null"); this.requestOptionsFactory = requestOptionsFactory; return this; } @Override public LeaseStoreManagerBuilderDefinition hostName(String hostName) { checkNotNull(hostName, "Argument 'hostName' can not be null"); this.settings.withHostName(hostName); return this; } @Override public LeaseStoreManager build() { checkNotNull(this.settings, "settings can not be null"); checkNotNull(this.settings.getContainerNamePrefix(), "settings.containerNamePrefix can not be null"); checkNotNull(this.settings.getLeaseCollectionLink(), "settings.leaseCollectionLink can not be null"); checkArgument(StringUtils.isNotEmpty(this.settings.getHostName()), "settings.getHostName can not be null nor empty"); checkNotNull(this.leaseDocumentClient, "leaseDocumentClient can not be null"); checkNotNull(this.requestOptionsFactory, "requestOptionsFactory can not be null"); if (this.leaseUpdater == null) { this.leaseUpdater = new DocumentServiceLeaseUpdaterImpl(leaseDocumentClient); } this.leaseStore = new LeaseStoreImpl( this.leaseDocumentClient, this.settings.getContainerNamePrefix(), this.settings.getLeaseCollectionLink(), this.requestOptionsFactory); return this; } @Override public Flux<Lease> getAllLeases() { return this.listDocuments(this.getPartitionLeasePrefix()) .map(documentServiceLease -> documentServiceLease); } @Override public Flux<Lease> getOwnedLeases() { return this.getAllLeases() .filter(lease -> lease.getOwner() != null && lease.getOwner().equalsIgnoreCase(this.settings.getHostName())); } @Override @Override public Mono<Lease> createLeaseIfNotExist(String leaseToken, String continuationToken, Map<String, String> properties) { throw new UnsupportedOperationException("partition key based leases are not supported for Change Feed V1 wire format"); } @Override public Mono<Lease> createLeaseIfNotExist(FeedRangeEpkImpl feedRange, String continuationToken) { return this.createLeaseIfNotExist(feedRange, continuationToken, null); } @Override public Mono<Lease> createLeaseIfNotExist(FeedRangeEpkImpl feedRange, String continuationToken, Map<String, String> properties) { checkNotNull(feedRange, "Argument 'feedRanges' should not be null"); String leaseToken = feedRange.getRange().getMin() + "-" + feedRange.getRange().getMax(); String leaseDocId = this.getDocumentId(leaseToken); ServiceItemLeaseV1 documentServiceLease = new ServiceItemLeaseV1() .withVersion(LeaseVersion.EPK_RANGE_BASED_LEASE) .withId(leaseDocId) .withLeaseToken(leaseToken) .withFeedRange(feedRange) .withContinuationToken(continuationToken) .withProperties(properties); return this.leaseDocumentClient.createItem(this.settings.getLeaseCollectionLink(), documentServiceLease, null, false) .onErrorResume( ex -> { if (ex instanceof CosmosException) { CosmosException e = (CosmosException) ex; if (Exceptions.isConflict(e)) { logger.info("Some other host created lease for {}.", leaseToken); return Mono.empty(); } } return Mono.error(ex); }) .map(documentResourceResponse -> { if (documentResourceResponse == null) { return null; } InternalObjectNode document = BridgeInternal.getProperties(documentResourceResponse); return documentServiceLease .withId(document.getId()) .withETag(document.getETag()) .withTs(ModelBridgeInternal.getStringFromJsonSerializable(document, Constants.Properties.LAST_MODIFIED)); }); } @Override public Mono<Void> delete(Lease lease) { if (lease == null || lease.getId() == null) { throw new IllegalArgumentException("lease"); } return this.leaseDocumentClient .deleteItem(lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease)) .onErrorResume( ex -> { if (ex instanceof CosmosException) { CosmosException e = (CosmosException) ex; if (Exceptions.isNotFound(e)) { return Mono.empty(); } } return Mono.error(ex); }) .map( documentResourceResponse -> true) .then(); } @Override public Mono<Void> deleteAll(List<Lease> leases) { checkNotNull(leases, "Argument 'leases' can not be null"); logger.info("Deleting all leases"); Map<String, CosmosItemIdentity> cosmosIdentityMap = new HashMap<>(); for (Lease lease : leases) { cosmosIdentityMap.put(lease.getId(), new CosmosItemIdentity(new PartitionKey(lease.getId()), lease.getId())); } return Mono.defer(() -> Mono.just(cosmosIdentityMap)) .flatMapMany(itemIdentities -> this.leaseDocumentClient.deleteAllItems(cosmosIdentityMap.values().stream().collect(Collectors.toList()))) .flatMap(itemResponse -> { if (itemResponse.getResponse() != null && itemResponse.getResponse().isSuccessStatusCode()) { cosmosIdentityMap.remove(itemResponse.getOperation().getId()); } else { int effectiveStatusCode = 0; int effectiveSubStatusCode = 0; if (itemResponse.getResponse() != null) { effectiveStatusCode = itemResponse.getResponse().getStatusCode(); effectiveSubStatusCode = itemResponse.getResponse().getStatusCode(); } else if (itemResponse.getException() != null && itemResponse.getException() instanceof CosmosException) { CosmosException cosmosException = (CosmosException) itemResponse.getException(); effectiveStatusCode = cosmosException.getStatusCode(); effectiveSubStatusCode = cosmosException.getSubStatusCode(); } if (effectiveStatusCode == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND && effectiveSubStatusCode == 0) { cosmosIdentityMap.remove(itemResponse.getOperation().getId()); } } return Mono.empty(); }) .repeat(() -> cosmosIdentityMap.size() != 0) .then(); } @Override public Mono<Lease> acquire(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } String oldOwner = lease.getOwner(); return this.leaseUpdater.updateLease( lease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(oldOwner)) { logger.info("Lease with token {} : lease was acquired already by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setOwner(this.settings.getHostName()); serverLease.setProperties(lease.getProperties()); return serverLease; }); } @Override public Mono<Void> release(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } return this.leaseDocumentClient.readItem(lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), InternalObjectNode.class) .onErrorResume( ex -> { if (ex instanceof CosmosException) { CosmosException e = (CosmosException) ex; if (Exceptions.isNotFound(e)) { logger.info("Lease with token {} : failed to renew lease. The lease is gone already.", lease.getLeaseToken()); throw new LeaseLostException(lease); } } return Mono.error(ex); }) .map( documentResourceResponse -> ServiceItemLeaseV1.fromDocument(BridgeInternal.getProperties(documentResourceResponse))) .flatMap( refreshedLease -> this.leaseUpdater.updateLease( refreshedLease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Lease with token {} : no need to release lease. The lease was already taken by another host '{}'.", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setOwner(null); return serverLease; }) ).then(); } @Override public Mono<Lease> renew(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } return this.leaseDocumentClient.readItem(lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), InternalObjectNode.class) .onErrorResume( ex -> { if (ex instanceof CosmosException) { CosmosException e = (CosmosException) ex; if (Exceptions.isNotFound(e)) { logger.info("Lease with token {} : failed to renew lease. The lease is gone already.", lease.getLeaseToken()); throw new LeaseLostException(lease); } } return Mono.error(ex); }) .map( documentResourceResponse -> ServiceItemLeaseV1.fromDocument(BridgeInternal.getProperties(documentResourceResponse))) .flatMap( refreshedLease -> this.leaseUpdater.updateLease( refreshedLease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() == null) { logger.info("Lease with token {} : lease was taken over and released by a different owner", lease.getLeaseToken()); throw new LeaseLostException(lease); } else if (!serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Lease with token {} : lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } return serverLease; }) ); } @Override public Mono<Lease> updateProperties(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } if (lease.getOwner() != null && !lease.getOwner().equalsIgnoreCase(this.settings.getHostName())) { logger.info("Lease with token '{}' : lease was taken over by owner '{}' before lease item update", lease.getLeaseToken(), lease.getOwner()); throw new LeaseLostException(lease); } return this.leaseUpdater.updateLease( lease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Lease with token '{}' : lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setProperties(lease.getProperties()); return serverLease; }); } @Override public Mono<Lease> checkpoint(Lease lease, String continuationToken, CancellationToken cancellationToken) { if (lease == null) { throw new IllegalArgumentException("lease"); } if (continuationToken == null || continuationToken.isEmpty()) { throw new IllegalArgumentException("continuationToken must be a non-empty string"); } if (cancellationToken.isCancellationRequested()) return Mono.error(new TaskCancelledException()); return this.leaseDocumentClient.readItem(lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), InternalObjectNode.class) .map(documentResourceResponse -> ServiceItemLeaseV1.fromDocument(BridgeInternal.getProperties(documentResourceResponse))) .flatMap(refreshedLease -> { if (cancellationToken.isCancellationRequested()) return Mono.error(new TaskCancelledException()); return this.leaseUpdater.updateLease( refreshedLease, lease.getId(), new PartitionKey(lease.getId()), this.requestOptionsFactory.createItemRequestOptions(lease), serverLease -> { if (serverLease.getOwner() == null) { logger.info("Lease with token {} : lease was taken over and released by a different owner", lease.getLeaseToken()); throw new LeaseLostException(lease); } else if (!serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Lease with token {} : lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setContinuationToken(continuationToken); return serverLease; }); }) .doOnError(throwable -> { logger.info("Lease with token {} : lease with token '{}' failed to checkpoint for owner '{}' with continuation token '{}'", lease.getLeaseToken(), lease.getConcurrencyToken(), lease.getOwner(), lease.getReadableContinuationToken()); }); } @Override public Mono<Boolean> isInitialized() { return this.leaseStore.isInitialized(); } @Override public Mono<Boolean> markInitialized() { return this.leaseStore.markInitialized(); } @Override public Mono<Boolean> acquireInitializationLock(Duration lockExpirationTime) { return this.leaseStore.acquireInitializationLock(lockExpirationTime); } @Override public Mono<Boolean> releaseInitializationLock() { return this.leaseStore.releaseInitializationLock(); } private Flux<ServiceItemLeaseV1> listDocuments(String prefix) { if (prefix == null || prefix.isEmpty()) { throw new IllegalArgumentException("prefix"); } SqlParameter param = new SqlParameter(); param.setName("@PartitionLeasePrefix"); param.setValue(prefix); SqlQuerySpec querySpec = new SqlQuerySpec( "SELECT * FROM c WHERE STARTSWITH(c.id, @PartitionLeasePrefix)", Collections.singletonList(param)); Flux<FeedResponse<InternalObjectNode>> query = this.leaseDocumentClient.queryItems( this.settings.getLeaseCollectionLink(), querySpec, this.requestOptionsFactory.createQueryRequestOptions(), InternalObjectNode.class); return query.flatMap( documentFeedResponse -> Flux.fromIterable(documentFeedResponse.getResults())) .map(ServiceItemLeaseV1::fromDocument); } private String getDocumentId(String leaseToken) { return this.getPartitionLeasePrefix() + leaseToken; } private String getPartitionLeasePrefix() { return this.settings.getContainerNamePrefix() + LEASE_STORE_MANAGER_LEASE_SUFFIX; } }
Is there an option to record the checksum of the request body instead of the whole request body? We still want to validate the integrity of the request body but just not store it in the recording.
public static void changeHeaders(HttpRequest request, URL proxyUrl, String xRecordingId, String mode, boolean skipRecordingRequestBody) { HttpHeader upstreamUri = request.getHeaders().get(X_RECORDING_UPSTREAM_BASE_URI); UrlBuilder proxyUrlBuilder = UrlBuilder.parse(request.getUrl()); proxyUrlBuilder.setScheme(proxyUrl.getProtocol()); proxyUrlBuilder.setHost(proxyUrl.getHost()); if (proxyUrl.getPort() != -1) { proxyUrlBuilder.setPort(proxyUrl.getPort()); } UrlBuilder originalUrlBuilder = UrlBuilder.parse(request.getUrl()); originalUrlBuilder.setPath(""); originalUrlBuilder.setQuery(""); try { URL originalUrl = originalUrlBuilder.toUrl(); HttpHeaders headers = request.getHeaders(); if (upstreamUri == null) { headers.set(X_RECORDING_UPSTREAM_BASE_URI, originalUrl.toString()); headers.set(X_RECORDING_MODE, mode); headers.set(X_RECORDING_ID, xRecordingId); if (mode.equals(RECORD_MODE) && skipRecordingRequestBody) { headers.set(X_RECORDING_SKIP, "request-body"); } } request.setUrl(proxyUrlBuilder.toUrl()); } catch (MalformedURLException e) { throw new RuntimeException(e); } }
headers.set(X_RECORDING_SKIP, "request-body");
public static void changeHeaders(HttpRequest request, URL proxyUrl, String xRecordingId, String mode, boolean skipRecordingRequestBody) { HttpHeader upstreamUri = request.getHeaders().get(X_RECORDING_UPSTREAM_BASE_URI); UrlBuilder proxyUrlBuilder = UrlBuilder.parse(request.getUrl()); proxyUrlBuilder.setScheme(proxyUrl.getProtocol()); proxyUrlBuilder.setHost(proxyUrl.getHost()); if (proxyUrl.getPort() != -1) { proxyUrlBuilder.setPort(proxyUrl.getPort()); } UrlBuilder originalUrlBuilder = UrlBuilder.parse(request.getUrl()); originalUrlBuilder.setPath(""); originalUrlBuilder.setQuery(""); try { URL originalUrl = originalUrlBuilder.toUrl(); HttpHeaders headers = request.getHeaders(); if (upstreamUri == null) { headers.set(X_RECORDING_UPSTREAM_BASE_URI, originalUrl.toString()); headers.set(X_RECORDING_MODE, mode); headers.set(X_RECORDING_ID, xRecordingId); if (mode.equals(RECORD_MODE) && skipRecordingRequestBody) { headers.set(X_RECORDING_SKIP, "request-body"); } } request.setUrl(proxyUrlBuilder.toUrl()); } catch (MalformedURLException e) { throw new RuntimeException(e); } }
class TestProxyUtils { private static final ClientLogger LOGGER = new ClientLogger(TestProxyUtils.class); private static final List<String> JSON_PROPERTIES_TO_REDACT = new ArrayList<String>( Arrays.asList("authHeader", "accountKey", "accessToken", "accountName", "applicationId", "apiKey", "connectionString", "url", "host", "password", "userName")); private static final Map<String, String> HEADER_KEY_REGEX_TO_REDACT = new HashMap<String, String>() {{ put("Operation-Location", URL_REGEX); put("operation-location", URL_REGEX); put("Location", URL_REGEX); }}; private static final List<String> BODY_REGEX_TO_REDACT = new ArrayList<>(Arrays.asList("(?:<Value>)(?<secret>.*)(?:</Value>)", "(?:Password=)(?<secret>.*)(?:;)", "(?:User ID=)(?<secret>.*)(?:;)", "(?:<PrimaryKey>)(?<secret>.*)(?:</PrimaryKey>)", "(?:<SecondaryKey>)(?<secret>.*)(?:</SecondaryKey>)")); private static final String URL_REGEX = "(?<=http: private static final List<String> HEADER_KEYS_TO_REDACT = new ArrayList<>(Arrays.asList("Ocp-Apim-Subscription-Key", "api-key", "x-api-key")); private static final String REDACTED_VALUE = "REDACTED"; private static final String DELEGATION_KEY_CLIENTID_REGEX = "(?:<SignedOid>)(?<secret>.*)(?:</SignedOid>)"; private static final String DELEGATION_KEY_TENANTID_REGEX = "(?:<SignedTid>)(?<secret>.*)(?:</SignedTid>)"; private static final HttpHeaderName X_RECORDING_UPSTREAM_BASE_URI = HttpHeaderName.fromString("x-recording-upstream-base-uri"); private static final HttpHeaderName X_RECORDING_MODE = HttpHeaderName.fromString("x-recording-mode"); private static final HttpHeaderName X_REQUEST_MISMATCH_ERROR = HttpHeaderName.fromString("x-request-mismatch-error"); private static final HttpHeaderName X_REQUEST_KNOWN_EXCEPTION_ERROR = HttpHeaderName.fromString("x-request-known-exception-error"); private static final HttpHeaderName X_REQUEST_EXCEPTION_EXCEPTION_ERROR = HttpHeaderName.fromString("x-request-exception-exception-error"); private static final HttpHeaderName X_ABSTRACTION_IDENTIFIER = HttpHeaderName.fromString("x-abstraction-identifier"); private static volatile URL proxyUrl; /** * Adds headers required for communication with the test proxy. * * @param request The request to add headers to. * @param proxyUrl The {@link URL} the proxy lives at. * @param xRecordingId The x-recording-id value for the current session. * @param mode The current test proxy mode. * @param skipRecordingRequestBody Flag indicating to skip recording request bodies when tests run in Record mode. * @throws RuntimeException Construction of one of the URLs failed. */ /** * Sets the response URL back to the original URL before returning it through the pipeline. * @param response The {@link HttpResponse} to modify. * @return The modified response. * @throws RuntimeException Construction of one of the URLs failed. */ public static HttpResponse revertUrl(HttpResponse response) { try { URL originalUrl = UrlBuilder.parse(response.getRequest().getHeaders() .getValue(X_RECORDING_UPSTREAM_BASE_URI)) .toUrl(); UrlBuilder currentUrl = UrlBuilder.parse(response.getRequest().getUrl()); currentUrl.setScheme(originalUrl.getProtocol()); currentUrl.setHost(originalUrl.getHost()); int port = originalUrl.getPort(); if (port == -1) { currentUrl.setPort(""); } else { currentUrl.setPort(port); } response.getRequest().setUrl(currentUrl.toUrl()); return response; } catch (MalformedURLException e) { throw new RuntimeException(e); } } /** * Gets the process name of the test proxy binary. * @return The platform specific process name. * @throws UnsupportedOperationException The current OS is not recognized. */ public static String getProxyProcessName() { String osName = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (osName.contains("windows")) { return "Azure.Sdk.Tools.TestProxy.exe"; } else if (osName.contains("linux")) { return "Azure.Sdk.Tools.TestProxy"; } else if (osName.contains("mac os x")) { return "Azure.Sdk.Tools.TestProxy"; } else { throw new UnsupportedOperationException(); } } /** * Checks the return from a request through the test proxy for special error headers. * @param httpResponse The {@link HttpResponse} from the test proxy. */ public static void checkForTestProxyErrors(HttpResponse httpResponse) { String error = httpResponse.getHeaderValue(X_REQUEST_MISMATCH_ERROR); if (error == null) { error = httpResponse.getHeaderValue(X_REQUEST_KNOWN_EXCEPTION_ERROR); } if (error == null) { error = httpResponse.getHeaderValue(X_REQUEST_EXCEPTION_EXCEPTION_ERROR); } if (error != null) { throw LOGGER.logExceptionAsError(new RuntimeException("Test proxy exception: " + new String(Base64.getDecoder().decode(error), StandardCharsets.UTF_8))); } } /** * Finds the test proxy version in the source tree. * @return The version string to use. * @throws RuntimeException The eng folder could not be located in the repo. * @throws UncheckedIOException The version file could not be read properly. */ public static String getTestProxyVersion() { Path rootPath = TestUtils.getRepoRoot(); Path versionFile = Paths.get("eng", "common", "testproxy", "target_version.txt"); rootPath = rootPath.resolve(versionFile); try { return Files.readAllLines(rootPath).get(0).replace(System.getProperty("line.separator"), ""); } catch (IOException e) { throw new UncheckedIOException(e); } } /** * Gets the current URL for the test proxy. * @return The {@link URL} location of the test proxy. * @throws RuntimeException The URL could not be constructed. */ public static URL getProxyUrl() { if (proxyUrl != null) { return proxyUrl; } UrlBuilder builder = new UrlBuilder(); builder.setHost("localhost"); builder.setScheme("http"); builder.setPort(5000); try { proxyUrl = builder.toUrl(); return proxyUrl; } catch (MalformedURLException e) { throw new RuntimeException(e); } } /** * Registers the default set of sanitizers for sanitizing request and responses * @return the list of default sanitizers to be added. */ public static List<TestProxySanitizer> loadSanitizers() { List<TestProxySanitizer> sanitizers = new ArrayList<>(addDefaultRegexSanitizers()); sanitizers.add(addDefaultUrlSanitizer()); sanitizers.addAll(addDefaultBodySanitizers()); sanitizers.addAll(addDefaultHeaderKeySanitizers()); return sanitizers; } private static String createCustomMatcherRequestBody(CustomMatcher customMatcher) { return String.format("{\"ignoredHeaders\":\"%s\",\"excludedHeaders\":\"%s\",\"compareBodies\":%s,\"ignoredQueryParameters\":\"%s\", \"ignoreQueryOrdering\":%s}", getCommaSeperatedString(customMatcher.getHeadersKeyOnlyMatch()), getCommaSeperatedString(customMatcher.getExcludedHeaders()), customMatcher.isComparingBodies(), getCommaSeperatedString(customMatcher.getIgnoredQueryParameters()), customMatcher.isQueryOrderingIgnored()); } private static String getCommaSeperatedString(List<String> stringList) { if (stringList == null) { return null; } return stringList.stream() .filter(s -> s != null && !s.isEmpty()) .collect(Collectors.joining(",")); } private static String createBodyJsonKeyRequestBody(String jsonKey, String regex, String redactedValue) { if (regex == null) { return String.format("{\"value\":\"%s\",\"jsonPath\":\"%s\"}", redactedValue, jsonKey); } else { return String.format("{\"value\":\"%s\",\"jsonPath\":\"%s\",\"regex\":\"%s\"}", redactedValue, jsonKey, regex); } } private static String createRegexRequestBody(String key, String regex, String value, String groupForReplace) { if (key == null) { if (groupForReplace == null) { return String.format("{\"value\":\"%s\",\"regex\":\"%s\"}", value, regex); } else { return String.format("{\"value\":\"%s\",\"regex\":\"%s\",\"groupForReplace\":\"%s\"}", value, regex, groupForReplace); } } else if (regex == null) { return String.format("{\"key\":\"%s\",\"value\":\"%s\"}", key, value); } if (groupForReplace == null) { return String.format("{\"key\":\"%s\",\"value\":\"%s\",\"regex\":\"%s\"}", key, value, regex); } else { return String.format("{\"key\":\"%s\",\"value\":\"%s\",\"regex\":\"%s\",\"groupForReplace\":\"%s\"}", key, value, regex, groupForReplace); } } /** * Creates a list of sanitizer requests to be sent to the test proxy server. * * @param sanitizers the list of sanitizers to be added. * @param proxyUrl The proxyUrl to use when constructing requests. * @return the list of sanitizer {@link HttpRequest requests} to be sent. * @throws RuntimeException if {@link TestProxySanitizerType} is not supported. */ public static List<HttpRequest> getSanitizerRequests(List<TestProxySanitizer> sanitizers, URL proxyUrl) { return sanitizers.stream().map(testProxySanitizer -> { String requestBody; String sanitizerType; switch (testProxySanitizer.getType()) { case URL: sanitizerType = TestProxySanitizerType.URL.getName(); requestBody = createRegexRequestBody(null, testProxySanitizer.getRegex(), testProxySanitizer.getRedactedValue(), testProxySanitizer.getGroupForReplace()); return createHttpRequest(requestBody, sanitizerType, proxyUrl); case BODY_REGEX: sanitizerType = TestProxySanitizerType.BODY_REGEX.getName(); requestBody = createRegexRequestBody(null, testProxySanitizer.getRegex(), testProxySanitizer.getRedactedValue(), testProxySanitizer.getGroupForReplace()); return createHttpRequest(requestBody, sanitizerType, proxyUrl); case BODY_KEY: sanitizerType = TestProxySanitizerType.BODY_KEY.getName(); requestBody = createBodyJsonKeyRequestBody(testProxySanitizer.getKey(), testProxySanitizer.getRegex(), testProxySanitizer.getRedactedValue()); return createHttpRequest(requestBody, sanitizerType, proxyUrl); case HEADER: sanitizerType = HEADER.getName(); if (testProxySanitizer.getKey() == null && testProxySanitizer.getRegex() == null) { throw new RuntimeException( String.format("Missing regexKey and/or headerKey for sanitizer type {%s}", sanitizerType)); } requestBody = createRegexRequestBody(testProxySanitizer.getKey(), testProxySanitizer.getRegex(), testProxySanitizer.getRedactedValue(), testProxySanitizer.getGroupForReplace()); return createHttpRequest(requestBody, sanitizerType, proxyUrl); default: throw new RuntimeException( String.format("Sanitizer type {%s} not supported", testProxySanitizer.getType())); } }).collect(Collectors.toList()); } private static HttpRequest createHttpRequest(String requestBody, String sanitizerType, URL proxyUrl) { HttpRequest request = new HttpRequest(HttpMethod.POST, String.format("%s/Admin/AddSanitizer", proxyUrl.toString())) .setBody(requestBody); request.setHeader(X_ABSTRACTION_IDENTIFIER, sanitizerType); return request; } /** * Creates a {@link List} of {@link HttpRequest} to be sent to the test proxy to register matchers. * @param matchers The {@link TestProxyRequestMatcher}s to encode into requests. * @param proxyUrl The proxyUrl to use when constructing requests. * @return The {@link HttpRequest}s to send to the proxy. * @throws RuntimeException The {@link TestProxyRequestMatcher.TestProxyRequestMatcherType} is unsupported. */ public static List<HttpRequest> getMatcherRequests(List<TestProxyRequestMatcher> matchers, URL proxyUrl) { return matchers.stream().map(testProxyMatcher -> { HttpRequest request; String matcherType; switch (testProxyMatcher.getType()) { case HEADERLESS: matcherType = TestProxyRequestMatcher.TestProxyRequestMatcherType.HEADERLESS.getName(); request = new HttpRequest(HttpMethod.POST, String.format("%s/Admin/setmatcher", proxyUrl.toString())); break; case BODILESS: request = new HttpRequest(HttpMethod.POST, String.format("%s/Admin/setmatcher", proxyUrl.toString())); matcherType = TestProxyRequestMatcher.TestProxyRequestMatcherType.BODILESS.getName(); break; case CUSTOM: CustomMatcher customMatcher = (CustomMatcher) testProxyMatcher; String requestBody = createCustomMatcherRequestBody(customMatcher); matcherType = TestProxyRequestMatcher.TestProxyRequestMatcherType.CUSTOM.getName(); request = new HttpRequest(HttpMethod.POST, String.format("%s/Admin/setmatcher", proxyUrl.toString())).setBody(requestBody); break; default: throw new RuntimeException(String.format("Matcher type {%s} not supported", testProxyMatcher.getType())); } request.setHeader(X_ABSTRACTION_IDENTIFIER, matcherType); return request; }).collect(Collectors.toList()); } private static TestProxySanitizer addDefaultUrlSanitizer() { return new TestProxySanitizer(URL_REGEX, REDACTED_VALUE, TestProxySanitizerType.URL); } private static List<TestProxySanitizer> addDefaultBodySanitizers() { return JSON_PROPERTIES_TO_REDACT.stream() .map(jsonProperty -> new TestProxySanitizer(String.format("$..%s", jsonProperty), null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY)) .collect(Collectors.toList()); } private static List<TestProxySanitizer> addDefaultRegexSanitizers() { List<TestProxySanitizer> regexSanitizers = getUserDelegationSanitizers(); regexSanitizers.addAll(BODY_REGEX_TO_REDACT.stream() .map(bodyRegex -> new TestProxySanitizer(bodyRegex, REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("secret")) .collect(Collectors.toList())); List<TestProxySanitizer> keyRegexSanitizers = new ArrayList<>(); HEADER_KEY_REGEX_TO_REDACT.forEach((key, regex) -> keyRegexSanitizers.add(new TestProxySanitizer(key, regex, REDACTED_VALUE, HEADER))); regexSanitizers.addAll(keyRegexSanitizers); return regexSanitizers; } private static List<TestProxySanitizer> addDefaultHeaderKeySanitizers() { return HEADER_KEYS_TO_REDACT.stream() .map(headerKey -> new TestProxySanitizer(headerKey, null, REDACTED_VALUE, HEADER)) .collect(Collectors.toList()); } private static List<TestProxySanitizer> getUserDelegationSanitizers() { List<TestProxySanitizer> userDelegationSanitizers = new ArrayList<>(); userDelegationSanitizers.add(new TestProxySanitizer(DELEGATION_KEY_CLIENTID_REGEX, REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("secret")); userDelegationSanitizers.add(new TestProxySanitizer(DELEGATION_KEY_TENANTID_REGEX, REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("secret")); return userDelegationSanitizers; } }
class TestProxyUtils { private static final ClientLogger LOGGER = new ClientLogger(TestProxyUtils.class); private static final HttpHeaderName X_RECORDING_SKIP = HttpHeaderName.fromString("x-recording-skip"); private static final List<String> JSON_PROPERTIES_TO_REDACT = new ArrayList<String>( Arrays.asList("authHeader", "accountKey", "accessToken", "accountName", "applicationId", "apiKey", "connectionString", "url", "host", "password", "userName")); private static final Map<String, String> HEADER_KEY_REGEX_TO_REDACT = new HashMap<String, String>() {{ put("Operation-Location", URL_REGEX); put("operation-location", URL_REGEX); put("Location", URL_REGEX); }}; private static final List<String> BODY_REGEX_TO_REDACT = new ArrayList<>(Arrays.asList("(?:<Value>)(?<secret>.*)(?:</Value>)", "(?:Password=)(?<secret>.*)(?:;)", "(?:User ID=)(?<secret>.*)(?:;)", "(?:<PrimaryKey>)(?<secret>.*)(?:</PrimaryKey>)", "(?:<SecondaryKey>)(?<secret>.*)(?:</SecondaryKey>)")); private static final String URL_REGEX = "(?<=http: private static final List<String> HEADER_KEYS_TO_REDACT = new ArrayList<>(Arrays.asList("Ocp-Apim-Subscription-Key", "api-key", "x-api-key")); private static final String REDACTED_VALUE = "REDACTED"; private static final String DELEGATION_KEY_CLIENTID_REGEX = "(?:<SignedOid>)(?<secret>.*)(?:</SignedOid>)"; private static final String DELEGATION_KEY_TENANTID_REGEX = "(?:<SignedTid>)(?<secret>.*)(?:</SignedTid>)"; private static final HttpHeaderName X_RECORDING_UPSTREAM_BASE_URI = HttpHeaderName.fromString("x-recording-upstream-base-uri"); private static final HttpHeaderName X_RECORDING_MODE = HttpHeaderName.fromString("x-recording-mode"); private static final HttpHeaderName X_REQUEST_MISMATCH_ERROR = HttpHeaderName.fromString("x-request-mismatch-error"); private static final HttpHeaderName X_REQUEST_KNOWN_EXCEPTION_ERROR = HttpHeaderName.fromString("x-request-known-exception-error"); private static final HttpHeaderName X_REQUEST_EXCEPTION_EXCEPTION_ERROR = HttpHeaderName.fromString("x-request-exception-exception-error"); private static final HttpHeaderName X_ABSTRACTION_IDENTIFIER = HttpHeaderName.fromString("x-abstraction-identifier"); private static volatile URL proxyUrl; /** * Adds headers required for communication with the test proxy. * * @param request The request to add headers to. * @param proxyUrl The {@link URL} the proxy lives at. * @param xRecordingId The x-recording-id value for the current session. * @param mode The current test proxy mode. * @param skipRecordingRequestBody Flag indicating to skip recording request bodies when tests run in Record mode. * @throws RuntimeException Construction of one of the URLs failed. */ /** * Sets the response URL back to the original URL before returning it through the pipeline. * @param response The {@link HttpResponse} to modify. * @return The modified response. * @throws RuntimeException Construction of one of the URLs failed. */ public static HttpResponse revertUrl(HttpResponse response) { try { URL originalUrl = UrlBuilder.parse(response.getRequest().getHeaders() .getValue(X_RECORDING_UPSTREAM_BASE_URI)) .toUrl(); UrlBuilder currentUrl = UrlBuilder.parse(response.getRequest().getUrl()); currentUrl.setScheme(originalUrl.getProtocol()); currentUrl.setHost(originalUrl.getHost()); int port = originalUrl.getPort(); if (port == -1) { currentUrl.setPort(""); } else { currentUrl.setPort(port); } response.getRequest().setUrl(currentUrl.toUrl()); return response; } catch (MalformedURLException e) { throw new RuntimeException(e); } } /** * Gets the process name of the test proxy binary. * @return The platform specific process name. * @throws UnsupportedOperationException The current OS is not recognized. */ public static String getProxyProcessName() { String osName = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (osName.contains("windows")) { return "Azure.Sdk.Tools.TestProxy.exe"; } else if (osName.contains("linux")) { return "Azure.Sdk.Tools.TestProxy"; } else if (osName.contains("mac os x")) { return "Azure.Sdk.Tools.TestProxy"; } else { throw new UnsupportedOperationException(); } } /** * Checks the return from a request through the test proxy for special error headers. * @param httpResponse The {@link HttpResponse} from the test proxy. */ public static void checkForTestProxyErrors(HttpResponse httpResponse) { String error = httpResponse.getHeaderValue(X_REQUEST_MISMATCH_ERROR); if (error == null) { error = httpResponse.getHeaderValue(X_REQUEST_KNOWN_EXCEPTION_ERROR); } if (error == null) { error = httpResponse.getHeaderValue(X_REQUEST_EXCEPTION_EXCEPTION_ERROR); } if (error != null) { throw LOGGER.logExceptionAsError(new RuntimeException("Test proxy exception: " + new String(Base64.getDecoder().decode(error), StandardCharsets.UTF_8))); } } /** * Finds the test proxy version in the source tree. * @return The version string to use. * @throws RuntimeException The eng folder could not be located in the repo. * @throws UncheckedIOException The version file could not be read properly. */ public static String getTestProxyVersion() { Path rootPath = TestUtils.getRepoRoot(); Path versionFile = Paths.get("eng", "common", "testproxy", "target_version.txt"); rootPath = rootPath.resolve(versionFile); try { return Files.readAllLines(rootPath).get(0).replace(System.getProperty("line.separator"), ""); } catch (IOException e) { throw new UncheckedIOException(e); } } /** * Gets the current URL for the test proxy. * @return The {@link URL} location of the test proxy. * @throws RuntimeException The URL could not be constructed. */ public static URL getProxyUrl() { if (proxyUrl != null) { return proxyUrl; } UrlBuilder builder = new UrlBuilder(); builder.setHost("localhost"); builder.setScheme("http"); builder.setPort(5000); try { proxyUrl = builder.toUrl(); return proxyUrl; } catch (MalformedURLException e) { throw new RuntimeException(e); } } /** * Registers the default set of sanitizers for sanitizing request and responses * @return the list of default sanitizers to be added. */ public static List<TestProxySanitizer> loadSanitizers() { List<TestProxySanitizer> sanitizers = new ArrayList<>(addDefaultRegexSanitizers()); sanitizers.add(addDefaultUrlSanitizer()); sanitizers.addAll(addDefaultBodySanitizers()); sanitizers.addAll(addDefaultHeaderKeySanitizers()); return sanitizers; } private static String createCustomMatcherRequestBody(CustomMatcher customMatcher) { return String.format("{\"ignoredHeaders\":\"%s\",\"excludedHeaders\":\"%s\",\"compareBodies\":%s,\"ignoredQueryParameters\":\"%s\", \"ignoreQueryOrdering\":%s}", getCommaSeperatedString(customMatcher.getHeadersKeyOnlyMatch()), getCommaSeperatedString(customMatcher.getExcludedHeaders()), customMatcher.isComparingBodies(), getCommaSeperatedString(customMatcher.getIgnoredQueryParameters()), customMatcher.isQueryOrderingIgnored()); } private static String getCommaSeperatedString(List<String> stringList) { if (stringList == null) { return null; } return stringList.stream() .filter(s -> s != null && !s.isEmpty()) .collect(Collectors.joining(",")); } private static String createBodyJsonKeyRequestBody(String jsonKey, String regex, String redactedValue) { if (regex == null) { return String.format("{\"value\":\"%s\",\"jsonPath\":\"%s\"}", redactedValue, jsonKey); } else { return String.format("{\"value\":\"%s\",\"jsonPath\":\"%s\",\"regex\":\"%s\"}", redactedValue, jsonKey, regex); } } private static String createRegexRequestBody(String key, String regex, String value, String groupForReplace) { if (key == null) { if (groupForReplace == null) { return String.format("{\"value\":\"%s\",\"regex\":\"%s\"}", value, regex); } else { return String.format("{\"value\":\"%s\",\"regex\":\"%s\",\"groupForReplace\":\"%s\"}", value, regex, groupForReplace); } } else if (regex == null) { return String.format("{\"key\":\"%s\",\"value\":\"%s\"}", key, value); } if (groupForReplace == null) { return String.format("{\"key\":\"%s\",\"value\":\"%s\",\"regex\":\"%s\"}", key, value, regex); } else { return String.format("{\"key\":\"%s\",\"value\":\"%s\",\"regex\":\"%s\",\"groupForReplace\":\"%s\"}", key, value, regex, groupForReplace); } } /** * Creates a list of sanitizer requests to be sent to the test proxy server. * * @param sanitizers the list of sanitizers to be added. * @param proxyUrl The proxyUrl to use when constructing requests. * @return the list of sanitizer {@link HttpRequest requests} to be sent. * @throws RuntimeException if {@link TestProxySanitizerType} is not supported. */ public static List<HttpRequest> getSanitizerRequests(List<TestProxySanitizer> sanitizers, URL proxyUrl) { return sanitizers.stream().map(testProxySanitizer -> { String requestBody; String sanitizerType; switch (testProxySanitizer.getType()) { case URL: sanitizerType = TestProxySanitizerType.URL.getName(); requestBody = createRegexRequestBody(null, testProxySanitizer.getRegex(), testProxySanitizer.getRedactedValue(), testProxySanitizer.getGroupForReplace()); return createHttpRequest(requestBody, sanitizerType, proxyUrl); case BODY_REGEX: sanitizerType = TestProxySanitizerType.BODY_REGEX.getName(); requestBody = createRegexRequestBody(null, testProxySanitizer.getRegex(), testProxySanitizer.getRedactedValue(), testProxySanitizer.getGroupForReplace()); return createHttpRequest(requestBody, sanitizerType, proxyUrl); case BODY_KEY: sanitizerType = TestProxySanitizerType.BODY_KEY.getName(); requestBody = createBodyJsonKeyRequestBody(testProxySanitizer.getKey(), testProxySanitizer.getRegex(), testProxySanitizer.getRedactedValue()); return createHttpRequest(requestBody, sanitizerType, proxyUrl); case HEADER: sanitizerType = HEADER.getName(); if (testProxySanitizer.getKey() == null && testProxySanitizer.getRegex() == null) { throw new RuntimeException( String.format("Missing regexKey and/or headerKey for sanitizer type {%s}", sanitizerType)); } requestBody = createRegexRequestBody(testProxySanitizer.getKey(), testProxySanitizer.getRegex(), testProxySanitizer.getRedactedValue(), testProxySanitizer.getGroupForReplace()); return createHttpRequest(requestBody, sanitizerType, proxyUrl); default: throw new RuntimeException( String.format("Sanitizer type {%s} not supported", testProxySanitizer.getType())); } }).collect(Collectors.toList()); } private static HttpRequest createHttpRequest(String requestBody, String sanitizerType, URL proxyUrl) { HttpRequest request = new HttpRequest(HttpMethod.POST, String.format("%s/Admin/AddSanitizer", proxyUrl.toString())) .setBody(requestBody); request.setHeader(X_ABSTRACTION_IDENTIFIER, sanitizerType); return request; } /** * Creates a {@link List} of {@link HttpRequest} to be sent to the test proxy to register matchers. * @param matchers The {@link TestProxyRequestMatcher}s to encode into requests. * @param proxyUrl The proxyUrl to use when constructing requests. * @return The {@link HttpRequest}s to send to the proxy. * @throws RuntimeException The {@link TestProxyRequestMatcher.TestProxyRequestMatcherType} is unsupported. */ public static List<HttpRequest> getMatcherRequests(List<TestProxyRequestMatcher> matchers, URL proxyUrl) { return matchers.stream().map(testProxyMatcher -> { HttpRequest request; String matcherType; switch (testProxyMatcher.getType()) { case HEADERLESS: matcherType = TestProxyRequestMatcher.TestProxyRequestMatcherType.HEADERLESS.getName(); request = new HttpRequest(HttpMethod.POST, String.format("%s/Admin/setmatcher", proxyUrl.toString())); break; case BODILESS: request = new HttpRequest(HttpMethod.POST, String.format("%s/Admin/setmatcher", proxyUrl.toString())); matcherType = TestProxyRequestMatcher.TestProxyRequestMatcherType.BODILESS.getName(); break; case CUSTOM: CustomMatcher customMatcher = (CustomMatcher) testProxyMatcher; String requestBody = createCustomMatcherRequestBody(customMatcher); matcherType = TestProxyRequestMatcher.TestProxyRequestMatcherType.CUSTOM.getName(); request = new HttpRequest(HttpMethod.POST, String.format("%s/Admin/setmatcher", proxyUrl.toString())).setBody(requestBody); break; default: throw new RuntimeException(String.format("Matcher type {%s} not supported", testProxyMatcher.getType())); } request.setHeader(X_ABSTRACTION_IDENTIFIER, matcherType); return request; }).collect(Collectors.toList()); } private static TestProxySanitizer addDefaultUrlSanitizer() { return new TestProxySanitizer(URL_REGEX, REDACTED_VALUE, TestProxySanitizerType.URL); } private static List<TestProxySanitizer> addDefaultBodySanitizers() { return JSON_PROPERTIES_TO_REDACT.stream() .map(jsonProperty -> new TestProxySanitizer(String.format("$..%s", jsonProperty), null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY)) .collect(Collectors.toList()); } private static List<TestProxySanitizer> addDefaultRegexSanitizers() { List<TestProxySanitizer> regexSanitizers = getUserDelegationSanitizers(); regexSanitizers.addAll(BODY_REGEX_TO_REDACT.stream() .map(bodyRegex -> new TestProxySanitizer(bodyRegex, REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("secret")) .collect(Collectors.toList())); List<TestProxySanitizer> keyRegexSanitizers = new ArrayList<>(); HEADER_KEY_REGEX_TO_REDACT.forEach((key, regex) -> keyRegexSanitizers.add(new TestProxySanitizer(key, regex, REDACTED_VALUE, HEADER))); regexSanitizers.addAll(keyRegexSanitizers); return regexSanitizers; } private static List<TestProxySanitizer> addDefaultHeaderKeySanitizers() { return HEADER_KEYS_TO_REDACT.stream() .map(headerKey -> new TestProxySanitizer(headerKey, null, REDACTED_VALUE, HEADER)) .collect(Collectors.toList()); } private static List<TestProxySanitizer> getUserDelegationSanitizers() { List<TestProxySanitizer> userDelegationSanitizers = new ArrayList<>(); userDelegationSanitizers.add(new TestProxySanitizer(DELEGATION_KEY_CLIENTID_REGEX, REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("secret")); userDelegationSanitizers.add(new TestProxySanitizer(DELEGATION_KEY_TENANTID_REGEX, REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("secret")); return userDelegationSanitizers; } }
I don't think we currently have that ability with Test Proxy. @scbedd - do you know if we support this in Test Proxy yet.
public static void changeHeaders(HttpRequest request, URL proxyUrl, String xRecordingId, String mode, boolean skipRecordingRequestBody) { HttpHeader upstreamUri = request.getHeaders().get(X_RECORDING_UPSTREAM_BASE_URI); UrlBuilder proxyUrlBuilder = UrlBuilder.parse(request.getUrl()); proxyUrlBuilder.setScheme(proxyUrl.getProtocol()); proxyUrlBuilder.setHost(proxyUrl.getHost()); if (proxyUrl.getPort() != -1) { proxyUrlBuilder.setPort(proxyUrl.getPort()); } UrlBuilder originalUrlBuilder = UrlBuilder.parse(request.getUrl()); originalUrlBuilder.setPath(""); originalUrlBuilder.setQuery(""); try { URL originalUrl = originalUrlBuilder.toUrl(); HttpHeaders headers = request.getHeaders(); if (upstreamUri == null) { headers.set(X_RECORDING_UPSTREAM_BASE_URI, originalUrl.toString()); headers.set(X_RECORDING_MODE, mode); headers.set(X_RECORDING_ID, xRecordingId); if (mode.equals(RECORD_MODE) && skipRecordingRequestBody) { headers.set(X_RECORDING_SKIP, "request-body"); } } request.setUrl(proxyUrlBuilder.toUrl()); } catch (MalformedURLException e) { throw new RuntimeException(e); } }
headers.set(X_RECORDING_SKIP, "request-body");
public static void changeHeaders(HttpRequest request, URL proxyUrl, String xRecordingId, String mode, boolean skipRecordingRequestBody) { HttpHeader upstreamUri = request.getHeaders().get(X_RECORDING_UPSTREAM_BASE_URI); UrlBuilder proxyUrlBuilder = UrlBuilder.parse(request.getUrl()); proxyUrlBuilder.setScheme(proxyUrl.getProtocol()); proxyUrlBuilder.setHost(proxyUrl.getHost()); if (proxyUrl.getPort() != -1) { proxyUrlBuilder.setPort(proxyUrl.getPort()); } UrlBuilder originalUrlBuilder = UrlBuilder.parse(request.getUrl()); originalUrlBuilder.setPath(""); originalUrlBuilder.setQuery(""); try { URL originalUrl = originalUrlBuilder.toUrl(); HttpHeaders headers = request.getHeaders(); if (upstreamUri == null) { headers.set(X_RECORDING_UPSTREAM_BASE_URI, originalUrl.toString()); headers.set(X_RECORDING_MODE, mode); headers.set(X_RECORDING_ID, xRecordingId); if (mode.equals(RECORD_MODE) && skipRecordingRequestBody) { headers.set(X_RECORDING_SKIP, "request-body"); } } request.setUrl(proxyUrlBuilder.toUrl()); } catch (MalformedURLException e) { throw new RuntimeException(e); } }
class TestProxyUtils { private static final ClientLogger LOGGER = new ClientLogger(TestProxyUtils.class); private static final List<String> JSON_PROPERTIES_TO_REDACT = new ArrayList<String>( Arrays.asList("authHeader", "accountKey", "accessToken", "accountName", "applicationId", "apiKey", "connectionString", "url", "host", "password", "userName")); private static final Map<String, String> HEADER_KEY_REGEX_TO_REDACT = new HashMap<String, String>() {{ put("Operation-Location", URL_REGEX); put("operation-location", URL_REGEX); put("Location", URL_REGEX); }}; private static final List<String> BODY_REGEX_TO_REDACT = new ArrayList<>(Arrays.asList("(?:<Value>)(?<secret>.*)(?:</Value>)", "(?:Password=)(?<secret>.*)(?:;)", "(?:User ID=)(?<secret>.*)(?:;)", "(?:<PrimaryKey>)(?<secret>.*)(?:</PrimaryKey>)", "(?:<SecondaryKey>)(?<secret>.*)(?:</SecondaryKey>)")); private static final String URL_REGEX = "(?<=http: private static final List<String> HEADER_KEYS_TO_REDACT = new ArrayList<>(Arrays.asList("Ocp-Apim-Subscription-Key", "api-key", "x-api-key")); private static final String REDACTED_VALUE = "REDACTED"; private static final String DELEGATION_KEY_CLIENTID_REGEX = "(?:<SignedOid>)(?<secret>.*)(?:</SignedOid>)"; private static final String DELEGATION_KEY_TENANTID_REGEX = "(?:<SignedTid>)(?<secret>.*)(?:</SignedTid>)"; private static final HttpHeaderName X_RECORDING_UPSTREAM_BASE_URI = HttpHeaderName.fromString("x-recording-upstream-base-uri"); private static final HttpHeaderName X_RECORDING_MODE = HttpHeaderName.fromString("x-recording-mode"); private static final HttpHeaderName X_REQUEST_MISMATCH_ERROR = HttpHeaderName.fromString("x-request-mismatch-error"); private static final HttpHeaderName X_REQUEST_KNOWN_EXCEPTION_ERROR = HttpHeaderName.fromString("x-request-known-exception-error"); private static final HttpHeaderName X_REQUEST_EXCEPTION_EXCEPTION_ERROR = HttpHeaderName.fromString("x-request-exception-exception-error"); private static final HttpHeaderName X_ABSTRACTION_IDENTIFIER = HttpHeaderName.fromString("x-abstraction-identifier"); private static volatile URL proxyUrl; /** * Adds headers required for communication with the test proxy. * * @param request The request to add headers to. * @param proxyUrl The {@link URL} the proxy lives at. * @param xRecordingId The x-recording-id value for the current session. * @param mode The current test proxy mode. * @param skipRecordingRequestBody Flag indicating to skip recording request bodies when tests run in Record mode. * @throws RuntimeException Construction of one of the URLs failed. */ /** * Sets the response URL back to the original URL before returning it through the pipeline. * @param response The {@link HttpResponse} to modify. * @return The modified response. * @throws RuntimeException Construction of one of the URLs failed. */ public static HttpResponse revertUrl(HttpResponse response) { try { URL originalUrl = UrlBuilder.parse(response.getRequest().getHeaders() .getValue(X_RECORDING_UPSTREAM_BASE_URI)) .toUrl(); UrlBuilder currentUrl = UrlBuilder.parse(response.getRequest().getUrl()); currentUrl.setScheme(originalUrl.getProtocol()); currentUrl.setHost(originalUrl.getHost()); int port = originalUrl.getPort(); if (port == -1) { currentUrl.setPort(""); } else { currentUrl.setPort(port); } response.getRequest().setUrl(currentUrl.toUrl()); return response; } catch (MalformedURLException e) { throw new RuntimeException(e); } } /** * Gets the process name of the test proxy binary. * @return The platform specific process name. * @throws UnsupportedOperationException The current OS is not recognized. */ public static String getProxyProcessName() { String osName = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (osName.contains("windows")) { return "Azure.Sdk.Tools.TestProxy.exe"; } else if (osName.contains("linux")) { return "Azure.Sdk.Tools.TestProxy"; } else if (osName.contains("mac os x")) { return "Azure.Sdk.Tools.TestProxy"; } else { throw new UnsupportedOperationException(); } } /** * Checks the return from a request through the test proxy for special error headers. * @param httpResponse The {@link HttpResponse} from the test proxy. */ public static void checkForTestProxyErrors(HttpResponse httpResponse) { String error = httpResponse.getHeaderValue(X_REQUEST_MISMATCH_ERROR); if (error == null) { error = httpResponse.getHeaderValue(X_REQUEST_KNOWN_EXCEPTION_ERROR); } if (error == null) { error = httpResponse.getHeaderValue(X_REQUEST_EXCEPTION_EXCEPTION_ERROR); } if (error != null) { throw LOGGER.logExceptionAsError(new RuntimeException("Test proxy exception: " + new String(Base64.getDecoder().decode(error), StandardCharsets.UTF_8))); } } /** * Finds the test proxy version in the source tree. * @return The version string to use. * @throws RuntimeException The eng folder could not be located in the repo. * @throws UncheckedIOException The version file could not be read properly. */ public static String getTestProxyVersion() { Path rootPath = TestUtils.getRepoRoot(); Path versionFile = Paths.get("eng", "common", "testproxy", "target_version.txt"); rootPath = rootPath.resolve(versionFile); try { return Files.readAllLines(rootPath).get(0).replace(System.getProperty("line.separator"), ""); } catch (IOException e) { throw new UncheckedIOException(e); } } /** * Gets the current URL for the test proxy. * @return The {@link URL} location of the test proxy. * @throws RuntimeException The URL could not be constructed. */ public static URL getProxyUrl() { if (proxyUrl != null) { return proxyUrl; } UrlBuilder builder = new UrlBuilder(); builder.setHost("localhost"); builder.setScheme("http"); builder.setPort(5000); try { proxyUrl = builder.toUrl(); return proxyUrl; } catch (MalformedURLException e) { throw new RuntimeException(e); } } /** * Registers the default set of sanitizers for sanitizing request and responses * @return the list of default sanitizers to be added. */ public static List<TestProxySanitizer> loadSanitizers() { List<TestProxySanitizer> sanitizers = new ArrayList<>(addDefaultRegexSanitizers()); sanitizers.add(addDefaultUrlSanitizer()); sanitizers.addAll(addDefaultBodySanitizers()); sanitizers.addAll(addDefaultHeaderKeySanitizers()); return sanitizers; } private static String createCustomMatcherRequestBody(CustomMatcher customMatcher) { return String.format("{\"ignoredHeaders\":\"%s\",\"excludedHeaders\":\"%s\",\"compareBodies\":%s,\"ignoredQueryParameters\":\"%s\", \"ignoreQueryOrdering\":%s}", getCommaSeperatedString(customMatcher.getHeadersKeyOnlyMatch()), getCommaSeperatedString(customMatcher.getExcludedHeaders()), customMatcher.isComparingBodies(), getCommaSeperatedString(customMatcher.getIgnoredQueryParameters()), customMatcher.isQueryOrderingIgnored()); } private static String getCommaSeperatedString(List<String> stringList) { if (stringList == null) { return null; } return stringList.stream() .filter(s -> s != null && !s.isEmpty()) .collect(Collectors.joining(",")); } private static String createBodyJsonKeyRequestBody(String jsonKey, String regex, String redactedValue) { if (regex == null) { return String.format("{\"value\":\"%s\",\"jsonPath\":\"%s\"}", redactedValue, jsonKey); } else { return String.format("{\"value\":\"%s\",\"jsonPath\":\"%s\",\"regex\":\"%s\"}", redactedValue, jsonKey, regex); } } private static String createRegexRequestBody(String key, String regex, String value, String groupForReplace) { if (key == null) { if (groupForReplace == null) { return String.format("{\"value\":\"%s\",\"regex\":\"%s\"}", value, regex); } else { return String.format("{\"value\":\"%s\",\"regex\":\"%s\",\"groupForReplace\":\"%s\"}", value, regex, groupForReplace); } } else if (regex == null) { return String.format("{\"key\":\"%s\",\"value\":\"%s\"}", key, value); } if (groupForReplace == null) { return String.format("{\"key\":\"%s\",\"value\":\"%s\",\"regex\":\"%s\"}", key, value, regex); } else { return String.format("{\"key\":\"%s\",\"value\":\"%s\",\"regex\":\"%s\",\"groupForReplace\":\"%s\"}", key, value, regex, groupForReplace); } } /** * Creates a list of sanitizer requests to be sent to the test proxy server. * * @param sanitizers the list of sanitizers to be added. * @param proxyUrl The proxyUrl to use when constructing requests. * @return the list of sanitizer {@link HttpRequest requests} to be sent. * @throws RuntimeException if {@link TestProxySanitizerType} is not supported. */ public static List<HttpRequest> getSanitizerRequests(List<TestProxySanitizer> sanitizers, URL proxyUrl) { return sanitizers.stream().map(testProxySanitizer -> { String requestBody; String sanitizerType; switch (testProxySanitizer.getType()) { case URL: sanitizerType = TestProxySanitizerType.URL.getName(); requestBody = createRegexRequestBody(null, testProxySanitizer.getRegex(), testProxySanitizer.getRedactedValue(), testProxySanitizer.getGroupForReplace()); return createHttpRequest(requestBody, sanitizerType, proxyUrl); case BODY_REGEX: sanitizerType = TestProxySanitizerType.BODY_REGEX.getName(); requestBody = createRegexRequestBody(null, testProxySanitizer.getRegex(), testProxySanitizer.getRedactedValue(), testProxySanitizer.getGroupForReplace()); return createHttpRequest(requestBody, sanitizerType, proxyUrl); case BODY_KEY: sanitizerType = TestProxySanitizerType.BODY_KEY.getName(); requestBody = createBodyJsonKeyRequestBody(testProxySanitizer.getKey(), testProxySanitizer.getRegex(), testProxySanitizer.getRedactedValue()); return createHttpRequest(requestBody, sanitizerType, proxyUrl); case HEADER: sanitizerType = HEADER.getName(); if (testProxySanitizer.getKey() == null && testProxySanitizer.getRegex() == null) { throw new RuntimeException( String.format("Missing regexKey and/or headerKey for sanitizer type {%s}", sanitizerType)); } requestBody = createRegexRequestBody(testProxySanitizer.getKey(), testProxySanitizer.getRegex(), testProxySanitizer.getRedactedValue(), testProxySanitizer.getGroupForReplace()); return createHttpRequest(requestBody, sanitizerType, proxyUrl); default: throw new RuntimeException( String.format("Sanitizer type {%s} not supported", testProxySanitizer.getType())); } }).collect(Collectors.toList()); } private static HttpRequest createHttpRequest(String requestBody, String sanitizerType, URL proxyUrl) { HttpRequest request = new HttpRequest(HttpMethod.POST, String.format("%s/Admin/AddSanitizer", proxyUrl.toString())) .setBody(requestBody); request.setHeader(X_ABSTRACTION_IDENTIFIER, sanitizerType); return request; } /** * Creates a {@link List} of {@link HttpRequest} to be sent to the test proxy to register matchers. * @param matchers The {@link TestProxyRequestMatcher}s to encode into requests. * @param proxyUrl The proxyUrl to use when constructing requests. * @return The {@link HttpRequest}s to send to the proxy. * @throws RuntimeException The {@link TestProxyRequestMatcher.TestProxyRequestMatcherType} is unsupported. */ public static List<HttpRequest> getMatcherRequests(List<TestProxyRequestMatcher> matchers, URL proxyUrl) { return matchers.stream().map(testProxyMatcher -> { HttpRequest request; String matcherType; switch (testProxyMatcher.getType()) { case HEADERLESS: matcherType = TestProxyRequestMatcher.TestProxyRequestMatcherType.HEADERLESS.getName(); request = new HttpRequest(HttpMethod.POST, String.format("%s/Admin/setmatcher", proxyUrl.toString())); break; case BODILESS: request = new HttpRequest(HttpMethod.POST, String.format("%s/Admin/setmatcher", proxyUrl.toString())); matcherType = TestProxyRequestMatcher.TestProxyRequestMatcherType.BODILESS.getName(); break; case CUSTOM: CustomMatcher customMatcher = (CustomMatcher) testProxyMatcher; String requestBody = createCustomMatcherRequestBody(customMatcher); matcherType = TestProxyRequestMatcher.TestProxyRequestMatcherType.CUSTOM.getName(); request = new HttpRequest(HttpMethod.POST, String.format("%s/Admin/setmatcher", proxyUrl.toString())).setBody(requestBody); break; default: throw new RuntimeException(String.format("Matcher type {%s} not supported", testProxyMatcher.getType())); } request.setHeader(X_ABSTRACTION_IDENTIFIER, matcherType); return request; }).collect(Collectors.toList()); } private static TestProxySanitizer addDefaultUrlSanitizer() { return new TestProxySanitizer(URL_REGEX, REDACTED_VALUE, TestProxySanitizerType.URL); } private static List<TestProxySanitizer> addDefaultBodySanitizers() { return JSON_PROPERTIES_TO_REDACT.stream() .map(jsonProperty -> new TestProxySanitizer(String.format("$..%s", jsonProperty), null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY)) .collect(Collectors.toList()); } private static List<TestProxySanitizer> addDefaultRegexSanitizers() { List<TestProxySanitizer> regexSanitizers = getUserDelegationSanitizers(); regexSanitizers.addAll(BODY_REGEX_TO_REDACT.stream() .map(bodyRegex -> new TestProxySanitizer(bodyRegex, REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("secret")) .collect(Collectors.toList())); List<TestProxySanitizer> keyRegexSanitizers = new ArrayList<>(); HEADER_KEY_REGEX_TO_REDACT.forEach((key, regex) -> keyRegexSanitizers.add(new TestProxySanitizer(key, regex, REDACTED_VALUE, HEADER))); regexSanitizers.addAll(keyRegexSanitizers); return regexSanitizers; } private static List<TestProxySanitizer> addDefaultHeaderKeySanitizers() { return HEADER_KEYS_TO_REDACT.stream() .map(headerKey -> new TestProxySanitizer(headerKey, null, REDACTED_VALUE, HEADER)) .collect(Collectors.toList()); } private static List<TestProxySanitizer> getUserDelegationSanitizers() { List<TestProxySanitizer> userDelegationSanitizers = new ArrayList<>(); userDelegationSanitizers.add(new TestProxySanitizer(DELEGATION_KEY_CLIENTID_REGEX, REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("secret")); userDelegationSanitizers.add(new TestProxySanitizer(DELEGATION_KEY_TENANTID_REGEX, REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("secret")); return userDelegationSanitizers; } }
class TestProxyUtils { private static final ClientLogger LOGGER = new ClientLogger(TestProxyUtils.class); private static final HttpHeaderName X_RECORDING_SKIP = HttpHeaderName.fromString("x-recording-skip"); private static final List<String> JSON_PROPERTIES_TO_REDACT = new ArrayList<String>( Arrays.asList("authHeader", "accountKey", "accessToken", "accountName", "applicationId", "apiKey", "connectionString", "url", "host", "password", "userName")); private static final Map<String, String> HEADER_KEY_REGEX_TO_REDACT = new HashMap<String, String>() {{ put("Operation-Location", URL_REGEX); put("operation-location", URL_REGEX); put("Location", URL_REGEX); }}; private static final List<String> BODY_REGEX_TO_REDACT = new ArrayList<>(Arrays.asList("(?:<Value>)(?<secret>.*)(?:</Value>)", "(?:Password=)(?<secret>.*)(?:;)", "(?:User ID=)(?<secret>.*)(?:;)", "(?:<PrimaryKey>)(?<secret>.*)(?:</PrimaryKey>)", "(?:<SecondaryKey>)(?<secret>.*)(?:</SecondaryKey>)")); private static final String URL_REGEX = "(?<=http: private static final List<String> HEADER_KEYS_TO_REDACT = new ArrayList<>(Arrays.asList("Ocp-Apim-Subscription-Key", "api-key", "x-api-key")); private static final String REDACTED_VALUE = "REDACTED"; private static final String DELEGATION_KEY_CLIENTID_REGEX = "(?:<SignedOid>)(?<secret>.*)(?:</SignedOid>)"; private static final String DELEGATION_KEY_TENANTID_REGEX = "(?:<SignedTid>)(?<secret>.*)(?:</SignedTid>)"; private static final HttpHeaderName X_RECORDING_UPSTREAM_BASE_URI = HttpHeaderName.fromString("x-recording-upstream-base-uri"); private static final HttpHeaderName X_RECORDING_MODE = HttpHeaderName.fromString("x-recording-mode"); private static final HttpHeaderName X_REQUEST_MISMATCH_ERROR = HttpHeaderName.fromString("x-request-mismatch-error"); private static final HttpHeaderName X_REQUEST_KNOWN_EXCEPTION_ERROR = HttpHeaderName.fromString("x-request-known-exception-error"); private static final HttpHeaderName X_REQUEST_EXCEPTION_EXCEPTION_ERROR = HttpHeaderName.fromString("x-request-exception-exception-error"); private static final HttpHeaderName X_ABSTRACTION_IDENTIFIER = HttpHeaderName.fromString("x-abstraction-identifier"); private static volatile URL proxyUrl; /** * Adds headers required for communication with the test proxy. * * @param request The request to add headers to. * @param proxyUrl The {@link URL} the proxy lives at. * @param xRecordingId The x-recording-id value for the current session. * @param mode The current test proxy mode. * @param skipRecordingRequestBody Flag indicating to skip recording request bodies when tests run in Record mode. * @throws RuntimeException Construction of one of the URLs failed. */ /** * Sets the response URL back to the original URL before returning it through the pipeline. * @param response The {@link HttpResponse} to modify. * @return The modified response. * @throws RuntimeException Construction of one of the URLs failed. */ public static HttpResponse revertUrl(HttpResponse response) { try { URL originalUrl = UrlBuilder.parse(response.getRequest().getHeaders() .getValue(X_RECORDING_UPSTREAM_BASE_URI)) .toUrl(); UrlBuilder currentUrl = UrlBuilder.parse(response.getRequest().getUrl()); currentUrl.setScheme(originalUrl.getProtocol()); currentUrl.setHost(originalUrl.getHost()); int port = originalUrl.getPort(); if (port == -1) { currentUrl.setPort(""); } else { currentUrl.setPort(port); } response.getRequest().setUrl(currentUrl.toUrl()); return response; } catch (MalformedURLException e) { throw new RuntimeException(e); } } /** * Gets the process name of the test proxy binary. * @return The platform specific process name. * @throws UnsupportedOperationException The current OS is not recognized. */ public static String getProxyProcessName() { String osName = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (osName.contains("windows")) { return "Azure.Sdk.Tools.TestProxy.exe"; } else if (osName.contains("linux")) { return "Azure.Sdk.Tools.TestProxy"; } else if (osName.contains("mac os x")) { return "Azure.Sdk.Tools.TestProxy"; } else { throw new UnsupportedOperationException(); } } /** * Checks the return from a request through the test proxy for special error headers. * @param httpResponse The {@link HttpResponse} from the test proxy. */ public static void checkForTestProxyErrors(HttpResponse httpResponse) { String error = httpResponse.getHeaderValue(X_REQUEST_MISMATCH_ERROR); if (error == null) { error = httpResponse.getHeaderValue(X_REQUEST_KNOWN_EXCEPTION_ERROR); } if (error == null) { error = httpResponse.getHeaderValue(X_REQUEST_EXCEPTION_EXCEPTION_ERROR); } if (error != null) { throw LOGGER.logExceptionAsError(new RuntimeException("Test proxy exception: " + new String(Base64.getDecoder().decode(error), StandardCharsets.UTF_8))); } } /** * Finds the test proxy version in the source tree. * @return The version string to use. * @throws RuntimeException The eng folder could not be located in the repo. * @throws UncheckedIOException The version file could not be read properly. */ public static String getTestProxyVersion() { Path rootPath = TestUtils.getRepoRoot(); Path versionFile = Paths.get("eng", "common", "testproxy", "target_version.txt"); rootPath = rootPath.resolve(versionFile); try { return Files.readAllLines(rootPath).get(0).replace(System.getProperty("line.separator"), ""); } catch (IOException e) { throw new UncheckedIOException(e); } } /** * Gets the current URL for the test proxy. * @return The {@link URL} location of the test proxy. * @throws RuntimeException The URL could not be constructed. */ public static URL getProxyUrl() { if (proxyUrl != null) { return proxyUrl; } UrlBuilder builder = new UrlBuilder(); builder.setHost("localhost"); builder.setScheme("http"); builder.setPort(5000); try { proxyUrl = builder.toUrl(); return proxyUrl; } catch (MalformedURLException e) { throw new RuntimeException(e); } } /** * Registers the default set of sanitizers for sanitizing request and responses * @return the list of default sanitizers to be added. */ public static List<TestProxySanitizer> loadSanitizers() { List<TestProxySanitizer> sanitizers = new ArrayList<>(addDefaultRegexSanitizers()); sanitizers.add(addDefaultUrlSanitizer()); sanitizers.addAll(addDefaultBodySanitizers()); sanitizers.addAll(addDefaultHeaderKeySanitizers()); return sanitizers; } private static String createCustomMatcherRequestBody(CustomMatcher customMatcher) { return String.format("{\"ignoredHeaders\":\"%s\",\"excludedHeaders\":\"%s\",\"compareBodies\":%s,\"ignoredQueryParameters\":\"%s\", \"ignoreQueryOrdering\":%s}", getCommaSeperatedString(customMatcher.getHeadersKeyOnlyMatch()), getCommaSeperatedString(customMatcher.getExcludedHeaders()), customMatcher.isComparingBodies(), getCommaSeperatedString(customMatcher.getIgnoredQueryParameters()), customMatcher.isQueryOrderingIgnored()); } private static String getCommaSeperatedString(List<String> stringList) { if (stringList == null) { return null; } return stringList.stream() .filter(s -> s != null && !s.isEmpty()) .collect(Collectors.joining(",")); } private static String createBodyJsonKeyRequestBody(String jsonKey, String regex, String redactedValue) { if (regex == null) { return String.format("{\"value\":\"%s\",\"jsonPath\":\"%s\"}", redactedValue, jsonKey); } else { return String.format("{\"value\":\"%s\",\"jsonPath\":\"%s\",\"regex\":\"%s\"}", redactedValue, jsonKey, regex); } } private static String createRegexRequestBody(String key, String regex, String value, String groupForReplace) { if (key == null) { if (groupForReplace == null) { return String.format("{\"value\":\"%s\",\"regex\":\"%s\"}", value, regex); } else { return String.format("{\"value\":\"%s\",\"regex\":\"%s\",\"groupForReplace\":\"%s\"}", value, regex, groupForReplace); } } else if (regex == null) { return String.format("{\"key\":\"%s\",\"value\":\"%s\"}", key, value); } if (groupForReplace == null) { return String.format("{\"key\":\"%s\",\"value\":\"%s\",\"regex\":\"%s\"}", key, value, regex); } else { return String.format("{\"key\":\"%s\",\"value\":\"%s\",\"regex\":\"%s\",\"groupForReplace\":\"%s\"}", key, value, regex, groupForReplace); } } /** * Creates a list of sanitizer requests to be sent to the test proxy server. * * @param sanitizers the list of sanitizers to be added. * @param proxyUrl The proxyUrl to use when constructing requests. * @return the list of sanitizer {@link HttpRequest requests} to be sent. * @throws RuntimeException if {@link TestProxySanitizerType} is not supported. */ public static List<HttpRequest> getSanitizerRequests(List<TestProxySanitizer> sanitizers, URL proxyUrl) { return sanitizers.stream().map(testProxySanitizer -> { String requestBody; String sanitizerType; switch (testProxySanitizer.getType()) { case URL: sanitizerType = TestProxySanitizerType.URL.getName(); requestBody = createRegexRequestBody(null, testProxySanitizer.getRegex(), testProxySanitizer.getRedactedValue(), testProxySanitizer.getGroupForReplace()); return createHttpRequest(requestBody, sanitizerType, proxyUrl); case BODY_REGEX: sanitizerType = TestProxySanitizerType.BODY_REGEX.getName(); requestBody = createRegexRequestBody(null, testProxySanitizer.getRegex(), testProxySanitizer.getRedactedValue(), testProxySanitizer.getGroupForReplace()); return createHttpRequest(requestBody, sanitizerType, proxyUrl); case BODY_KEY: sanitizerType = TestProxySanitizerType.BODY_KEY.getName(); requestBody = createBodyJsonKeyRequestBody(testProxySanitizer.getKey(), testProxySanitizer.getRegex(), testProxySanitizer.getRedactedValue()); return createHttpRequest(requestBody, sanitizerType, proxyUrl); case HEADER: sanitizerType = HEADER.getName(); if (testProxySanitizer.getKey() == null && testProxySanitizer.getRegex() == null) { throw new RuntimeException( String.format("Missing regexKey and/or headerKey for sanitizer type {%s}", sanitizerType)); } requestBody = createRegexRequestBody(testProxySanitizer.getKey(), testProxySanitizer.getRegex(), testProxySanitizer.getRedactedValue(), testProxySanitizer.getGroupForReplace()); return createHttpRequest(requestBody, sanitizerType, proxyUrl); default: throw new RuntimeException( String.format("Sanitizer type {%s} not supported", testProxySanitizer.getType())); } }).collect(Collectors.toList()); } private static HttpRequest createHttpRequest(String requestBody, String sanitizerType, URL proxyUrl) { HttpRequest request = new HttpRequest(HttpMethod.POST, String.format("%s/Admin/AddSanitizer", proxyUrl.toString())) .setBody(requestBody); request.setHeader(X_ABSTRACTION_IDENTIFIER, sanitizerType); return request; } /** * Creates a {@link List} of {@link HttpRequest} to be sent to the test proxy to register matchers. * @param matchers The {@link TestProxyRequestMatcher}s to encode into requests. * @param proxyUrl The proxyUrl to use when constructing requests. * @return The {@link HttpRequest}s to send to the proxy. * @throws RuntimeException The {@link TestProxyRequestMatcher.TestProxyRequestMatcherType} is unsupported. */ public static List<HttpRequest> getMatcherRequests(List<TestProxyRequestMatcher> matchers, URL proxyUrl) { return matchers.stream().map(testProxyMatcher -> { HttpRequest request; String matcherType; switch (testProxyMatcher.getType()) { case HEADERLESS: matcherType = TestProxyRequestMatcher.TestProxyRequestMatcherType.HEADERLESS.getName(); request = new HttpRequest(HttpMethod.POST, String.format("%s/Admin/setmatcher", proxyUrl.toString())); break; case BODILESS: request = new HttpRequest(HttpMethod.POST, String.format("%s/Admin/setmatcher", proxyUrl.toString())); matcherType = TestProxyRequestMatcher.TestProxyRequestMatcherType.BODILESS.getName(); break; case CUSTOM: CustomMatcher customMatcher = (CustomMatcher) testProxyMatcher; String requestBody = createCustomMatcherRequestBody(customMatcher); matcherType = TestProxyRequestMatcher.TestProxyRequestMatcherType.CUSTOM.getName(); request = new HttpRequest(HttpMethod.POST, String.format("%s/Admin/setmatcher", proxyUrl.toString())).setBody(requestBody); break; default: throw new RuntimeException(String.format("Matcher type {%s} not supported", testProxyMatcher.getType())); } request.setHeader(X_ABSTRACTION_IDENTIFIER, matcherType); return request; }).collect(Collectors.toList()); } private static TestProxySanitizer addDefaultUrlSanitizer() { return new TestProxySanitizer(URL_REGEX, REDACTED_VALUE, TestProxySanitizerType.URL); } private static List<TestProxySanitizer> addDefaultBodySanitizers() { return JSON_PROPERTIES_TO_REDACT.stream() .map(jsonProperty -> new TestProxySanitizer(String.format("$..%s", jsonProperty), null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY)) .collect(Collectors.toList()); } private static List<TestProxySanitizer> addDefaultRegexSanitizers() { List<TestProxySanitizer> regexSanitizers = getUserDelegationSanitizers(); regexSanitizers.addAll(BODY_REGEX_TO_REDACT.stream() .map(bodyRegex -> new TestProxySanitizer(bodyRegex, REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("secret")) .collect(Collectors.toList())); List<TestProxySanitizer> keyRegexSanitizers = new ArrayList<>(); HEADER_KEY_REGEX_TO_REDACT.forEach((key, regex) -> keyRegexSanitizers.add(new TestProxySanitizer(key, regex, REDACTED_VALUE, HEADER))); regexSanitizers.addAll(keyRegexSanitizers); return regexSanitizers; } private static List<TestProxySanitizer> addDefaultHeaderKeySanitizers() { return HEADER_KEYS_TO_REDACT.stream() .map(headerKey -> new TestProxySanitizer(headerKey, null, REDACTED_VALUE, HEADER)) .collect(Collectors.toList()); } private static List<TestProxySanitizer> getUserDelegationSanitizers() { List<TestProxySanitizer> userDelegationSanitizers = new ArrayList<>(); userDelegationSanitizers.add(new TestProxySanitizer(DELEGATION_KEY_CLIENTID_REGEX, REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("secret")); userDelegationSanitizers.add(new TestProxySanitizer(DELEGATION_KEY_TENANTID_REGEX, REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("secret")); return userDelegationSanitizers; } }
We do not yet. https://github.com/Azure/azure-sdk-tools/issues/6074
public static void changeHeaders(HttpRequest request, URL proxyUrl, String xRecordingId, String mode, boolean skipRecordingRequestBody) { HttpHeader upstreamUri = request.getHeaders().get(X_RECORDING_UPSTREAM_BASE_URI); UrlBuilder proxyUrlBuilder = UrlBuilder.parse(request.getUrl()); proxyUrlBuilder.setScheme(proxyUrl.getProtocol()); proxyUrlBuilder.setHost(proxyUrl.getHost()); if (proxyUrl.getPort() != -1) { proxyUrlBuilder.setPort(proxyUrl.getPort()); } UrlBuilder originalUrlBuilder = UrlBuilder.parse(request.getUrl()); originalUrlBuilder.setPath(""); originalUrlBuilder.setQuery(""); try { URL originalUrl = originalUrlBuilder.toUrl(); HttpHeaders headers = request.getHeaders(); if (upstreamUri == null) { headers.set(X_RECORDING_UPSTREAM_BASE_URI, originalUrl.toString()); headers.set(X_RECORDING_MODE, mode); headers.set(X_RECORDING_ID, xRecordingId); if (mode.equals(RECORD_MODE) && skipRecordingRequestBody) { headers.set(X_RECORDING_SKIP, "request-body"); } } request.setUrl(proxyUrlBuilder.toUrl()); } catch (MalformedURLException e) { throw new RuntimeException(e); } }
headers.set(X_RECORDING_SKIP, "request-body");
public static void changeHeaders(HttpRequest request, URL proxyUrl, String xRecordingId, String mode, boolean skipRecordingRequestBody) { HttpHeader upstreamUri = request.getHeaders().get(X_RECORDING_UPSTREAM_BASE_URI); UrlBuilder proxyUrlBuilder = UrlBuilder.parse(request.getUrl()); proxyUrlBuilder.setScheme(proxyUrl.getProtocol()); proxyUrlBuilder.setHost(proxyUrl.getHost()); if (proxyUrl.getPort() != -1) { proxyUrlBuilder.setPort(proxyUrl.getPort()); } UrlBuilder originalUrlBuilder = UrlBuilder.parse(request.getUrl()); originalUrlBuilder.setPath(""); originalUrlBuilder.setQuery(""); try { URL originalUrl = originalUrlBuilder.toUrl(); HttpHeaders headers = request.getHeaders(); if (upstreamUri == null) { headers.set(X_RECORDING_UPSTREAM_BASE_URI, originalUrl.toString()); headers.set(X_RECORDING_MODE, mode); headers.set(X_RECORDING_ID, xRecordingId); if (mode.equals(RECORD_MODE) && skipRecordingRequestBody) { headers.set(X_RECORDING_SKIP, "request-body"); } } request.setUrl(proxyUrlBuilder.toUrl()); } catch (MalformedURLException e) { throw new RuntimeException(e); } }
class TestProxyUtils { private static final ClientLogger LOGGER = new ClientLogger(TestProxyUtils.class); private static final List<String> JSON_PROPERTIES_TO_REDACT = new ArrayList<String>( Arrays.asList("authHeader", "accountKey", "accessToken", "accountName", "applicationId", "apiKey", "connectionString", "url", "host", "password", "userName")); private static final Map<String, String> HEADER_KEY_REGEX_TO_REDACT = new HashMap<String, String>() {{ put("Operation-Location", URL_REGEX); put("operation-location", URL_REGEX); put("Location", URL_REGEX); }}; private static final List<String> BODY_REGEX_TO_REDACT = new ArrayList<>(Arrays.asList("(?:<Value>)(?<secret>.*)(?:</Value>)", "(?:Password=)(?<secret>.*)(?:;)", "(?:User ID=)(?<secret>.*)(?:;)", "(?:<PrimaryKey>)(?<secret>.*)(?:</PrimaryKey>)", "(?:<SecondaryKey>)(?<secret>.*)(?:</SecondaryKey>)")); private static final String URL_REGEX = "(?<=http: private static final List<String> HEADER_KEYS_TO_REDACT = new ArrayList<>(Arrays.asList("Ocp-Apim-Subscription-Key", "api-key", "x-api-key")); private static final String REDACTED_VALUE = "REDACTED"; private static final String DELEGATION_KEY_CLIENTID_REGEX = "(?:<SignedOid>)(?<secret>.*)(?:</SignedOid>)"; private static final String DELEGATION_KEY_TENANTID_REGEX = "(?:<SignedTid>)(?<secret>.*)(?:</SignedTid>)"; private static final HttpHeaderName X_RECORDING_UPSTREAM_BASE_URI = HttpHeaderName.fromString("x-recording-upstream-base-uri"); private static final HttpHeaderName X_RECORDING_MODE = HttpHeaderName.fromString("x-recording-mode"); private static final HttpHeaderName X_REQUEST_MISMATCH_ERROR = HttpHeaderName.fromString("x-request-mismatch-error"); private static final HttpHeaderName X_REQUEST_KNOWN_EXCEPTION_ERROR = HttpHeaderName.fromString("x-request-known-exception-error"); private static final HttpHeaderName X_REQUEST_EXCEPTION_EXCEPTION_ERROR = HttpHeaderName.fromString("x-request-exception-exception-error"); private static final HttpHeaderName X_ABSTRACTION_IDENTIFIER = HttpHeaderName.fromString("x-abstraction-identifier"); private static volatile URL proxyUrl; /** * Adds headers required for communication with the test proxy. * * @param request The request to add headers to. * @param proxyUrl The {@link URL} the proxy lives at. * @param xRecordingId The x-recording-id value for the current session. * @param mode The current test proxy mode. * @param skipRecordingRequestBody Flag indicating to skip recording request bodies when tests run in Record mode. * @throws RuntimeException Construction of one of the URLs failed. */ /** * Sets the response URL back to the original URL before returning it through the pipeline. * @param response The {@link HttpResponse} to modify. * @return The modified response. * @throws RuntimeException Construction of one of the URLs failed. */ public static HttpResponse revertUrl(HttpResponse response) { try { URL originalUrl = UrlBuilder.parse(response.getRequest().getHeaders() .getValue(X_RECORDING_UPSTREAM_BASE_URI)) .toUrl(); UrlBuilder currentUrl = UrlBuilder.parse(response.getRequest().getUrl()); currentUrl.setScheme(originalUrl.getProtocol()); currentUrl.setHost(originalUrl.getHost()); int port = originalUrl.getPort(); if (port == -1) { currentUrl.setPort(""); } else { currentUrl.setPort(port); } response.getRequest().setUrl(currentUrl.toUrl()); return response; } catch (MalformedURLException e) { throw new RuntimeException(e); } } /** * Gets the process name of the test proxy binary. * @return The platform specific process name. * @throws UnsupportedOperationException The current OS is not recognized. */ public static String getProxyProcessName() { String osName = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (osName.contains("windows")) { return "Azure.Sdk.Tools.TestProxy.exe"; } else if (osName.contains("linux")) { return "Azure.Sdk.Tools.TestProxy"; } else if (osName.contains("mac os x")) { return "Azure.Sdk.Tools.TestProxy"; } else { throw new UnsupportedOperationException(); } } /** * Checks the return from a request through the test proxy for special error headers. * @param httpResponse The {@link HttpResponse} from the test proxy. */ public static void checkForTestProxyErrors(HttpResponse httpResponse) { String error = httpResponse.getHeaderValue(X_REQUEST_MISMATCH_ERROR); if (error == null) { error = httpResponse.getHeaderValue(X_REQUEST_KNOWN_EXCEPTION_ERROR); } if (error == null) { error = httpResponse.getHeaderValue(X_REQUEST_EXCEPTION_EXCEPTION_ERROR); } if (error != null) { throw LOGGER.logExceptionAsError(new RuntimeException("Test proxy exception: " + new String(Base64.getDecoder().decode(error), StandardCharsets.UTF_8))); } } /** * Finds the test proxy version in the source tree. * @return The version string to use. * @throws RuntimeException The eng folder could not be located in the repo. * @throws UncheckedIOException The version file could not be read properly. */ public static String getTestProxyVersion() { Path rootPath = TestUtils.getRepoRoot(); Path versionFile = Paths.get("eng", "common", "testproxy", "target_version.txt"); rootPath = rootPath.resolve(versionFile); try { return Files.readAllLines(rootPath).get(0).replace(System.getProperty("line.separator"), ""); } catch (IOException e) { throw new UncheckedIOException(e); } } /** * Gets the current URL for the test proxy. * @return The {@link URL} location of the test proxy. * @throws RuntimeException The URL could not be constructed. */ public static URL getProxyUrl() { if (proxyUrl != null) { return proxyUrl; } UrlBuilder builder = new UrlBuilder(); builder.setHost("localhost"); builder.setScheme("http"); builder.setPort(5000); try { proxyUrl = builder.toUrl(); return proxyUrl; } catch (MalformedURLException e) { throw new RuntimeException(e); } } /** * Registers the default set of sanitizers for sanitizing request and responses * @return the list of default sanitizers to be added. */ public static List<TestProxySanitizer> loadSanitizers() { List<TestProxySanitizer> sanitizers = new ArrayList<>(addDefaultRegexSanitizers()); sanitizers.add(addDefaultUrlSanitizer()); sanitizers.addAll(addDefaultBodySanitizers()); sanitizers.addAll(addDefaultHeaderKeySanitizers()); return sanitizers; } private static String createCustomMatcherRequestBody(CustomMatcher customMatcher) { return String.format("{\"ignoredHeaders\":\"%s\",\"excludedHeaders\":\"%s\",\"compareBodies\":%s,\"ignoredQueryParameters\":\"%s\", \"ignoreQueryOrdering\":%s}", getCommaSeperatedString(customMatcher.getHeadersKeyOnlyMatch()), getCommaSeperatedString(customMatcher.getExcludedHeaders()), customMatcher.isComparingBodies(), getCommaSeperatedString(customMatcher.getIgnoredQueryParameters()), customMatcher.isQueryOrderingIgnored()); } private static String getCommaSeperatedString(List<String> stringList) { if (stringList == null) { return null; } return stringList.stream() .filter(s -> s != null && !s.isEmpty()) .collect(Collectors.joining(",")); } private static String createBodyJsonKeyRequestBody(String jsonKey, String regex, String redactedValue) { if (regex == null) { return String.format("{\"value\":\"%s\",\"jsonPath\":\"%s\"}", redactedValue, jsonKey); } else { return String.format("{\"value\":\"%s\",\"jsonPath\":\"%s\",\"regex\":\"%s\"}", redactedValue, jsonKey, regex); } } private static String createRegexRequestBody(String key, String regex, String value, String groupForReplace) { if (key == null) { if (groupForReplace == null) { return String.format("{\"value\":\"%s\",\"regex\":\"%s\"}", value, regex); } else { return String.format("{\"value\":\"%s\",\"regex\":\"%s\",\"groupForReplace\":\"%s\"}", value, regex, groupForReplace); } } else if (regex == null) { return String.format("{\"key\":\"%s\",\"value\":\"%s\"}", key, value); } if (groupForReplace == null) { return String.format("{\"key\":\"%s\",\"value\":\"%s\",\"regex\":\"%s\"}", key, value, regex); } else { return String.format("{\"key\":\"%s\",\"value\":\"%s\",\"regex\":\"%s\",\"groupForReplace\":\"%s\"}", key, value, regex, groupForReplace); } } /** * Creates a list of sanitizer requests to be sent to the test proxy server. * * @param sanitizers the list of sanitizers to be added. * @param proxyUrl The proxyUrl to use when constructing requests. * @return the list of sanitizer {@link HttpRequest requests} to be sent. * @throws RuntimeException if {@link TestProxySanitizerType} is not supported. */ public static List<HttpRequest> getSanitizerRequests(List<TestProxySanitizer> sanitizers, URL proxyUrl) { return sanitizers.stream().map(testProxySanitizer -> { String requestBody; String sanitizerType; switch (testProxySanitizer.getType()) { case URL: sanitizerType = TestProxySanitizerType.URL.getName(); requestBody = createRegexRequestBody(null, testProxySanitizer.getRegex(), testProxySanitizer.getRedactedValue(), testProxySanitizer.getGroupForReplace()); return createHttpRequest(requestBody, sanitizerType, proxyUrl); case BODY_REGEX: sanitizerType = TestProxySanitizerType.BODY_REGEX.getName(); requestBody = createRegexRequestBody(null, testProxySanitizer.getRegex(), testProxySanitizer.getRedactedValue(), testProxySanitizer.getGroupForReplace()); return createHttpRequest(requestBody, sanitizerType, proxyUrl); case BODY_KEY: sanitizerType = TestProxySanitizerType.BODY_KEY.getName(); requestBody = createBodyJsonKeyRequestBody(testProxySanitizer.getKey(), testProxySanitizer.getRegex(), testProxySanitizer.getRedactedValue()); return createHttpRequest(requestBody, sanitizerType, proxyUrl); case HEADER: sanitizerType = HEADER.getName(); if (testProxySanitizer.getKey() == null && testProxySanitizer.getRegex() == null) { throw new RuntimeException( String.format("Missing regexKey and/or headerKey for sanitizer type {%s}", sanitizerType)); } requestBody = createRegexRequestBody(testProxySanitizer.getKey(), testProxySanitizer.getRegex(), testProxySanitizer.getRedactedValue(), testProxySanitizer.getGroupForReplace()); return createHttpRequest(requestBody, sanitizerType, proxyUrl); default: throw new RuntimeException( String.format("Sanitizer type {%s} not supported", testProxySanitizer.getType())); } }).collect(Collectors.toList()); } private static HttpRequest createHttpRequest(String requestBody, String sanitizerType, URL proxyUrl) { HttpRequest request = new HttpRequest(HttpMethod.POST, String.format("%s/Admin/AddSanitizer", proxyUrl.toString())) .setBody(requestBody); request.setHeader(X_ABSTRACTION_IDENTIFIER, sanitizerType); return request; } /** * Creates a {@link List} of {@link HttpRequest} to be sent to the test proxy to register matchers. * @param matchers The {@link TestProxyRequestMatcher}s to encode into requests. * @param proxyUrl The proxyUrl to use when constructing requests. * @return The {@link HttpRequest}s to send to the proxy. * @throws RuntimeException The {@link TestProxyRequestMatcher.TestProxyRequestMatcherType} is unsupported. */ public static List<HttpRequest> getMatcherRequests(List<TestProxyRequestMatcher> matchers, URL proxyUrl) { return matchers.stream().map(testProxyMatcher -> { HttpRequest request; String matcherType; switch (testProxyMatcher.getType()) { case HEADERLESS: matcherType = TestProxyRequestMatcher.TestProxyRequestMatcherType.HEADERLESS.getName(); request = new HttpRequest(HttpMethod.POST, String.format("%s/Admin/setmatcher", proxyUrl.toString())); break; case BODILESS: request = new HttpRequest(HttpMethod.POST, String.format("%s/Admin/setmatcher", proxyUrl.toString())); matcherType = TestProxyRequestMatcher.TestProxyRequestMatcherType.BODILESS.getName(); break; case CUSTOM: CustomMatcher customMatcher = (CustomMatcher) testProxyMatcher; String requestBody = createCustomMatcherRequestBody(customMatcher); matcherType = TestProxyRequestMatcher.TestProxyRequestMatcherType.CUSTOM.getName(); request = new HttpRequest(HttpMethod.POST, String.format("%s/Admin/setmatcher", proxyUrl.toString())).setBody(requestBody); break; default: throw new RuntimeException(String.format("Matcher type {%s} not supported", testProxyMatcher.getType())); } request.setHeader(X_ABSTRACTION_IDENTIFIER, matcherType); return request; }).collect(Collectors.toList()); } private static TestProxySanitizer addDefaultUrlSanitizer() { return new TestProxySanitizer(URL_REGEX, REDACTED_VALUE, TestProxySanitizerType.URL); } private static List<TestProxySanitizer> addDefaultBodySanitizers() { return JSON_PROPERTIES_TO_REDACT.stream() .map(jsonProperty -> new TestProxySanitizer(String.format("$..%s", jsonProperty), null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY)) .collect(Collectors.toList()); } private static List<TestProxySanitizer> addDefaultRegexSanitizers() { List<TestProxySanitizer> regexSanitizers = getUserDelegationSanitizers(); regexSanitizers.addAll(BODY_REGEX_TO_REDACT.stream() .map(bodyRegex -> new TestProxySanitizer(bodyRegex, REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("secret")) .collect(Collectors.toList())); List<TestProxySanitizer> keyRegexSanitizers = new ArrayList<>(); HEADER_KEY_REGEX_TO_REDACT.forEach((key, regex) -> keyRegexSanitizers.add(new TestProxySanitizer(key, regex, REDACTED_VALUE, HEADER))); regexSanitizers.addAll(keyRegexSanitizers); return regexSanitizers; } private static List<TestProxySanitizer> addDefaultHeaderKeySanitizers() { return HEADER_KEYS_TO_REDACT.stream() .map(headerKey -> new TestProxySanitizer(headerKey, null, REDACTED_VALUE, HEADER)) .collect(Collectors.toList()); } private static List<TestProxySanitizer> getUserDelegationSanitizers() { List<TestProxySanitizer> userDelegationSanitizers = new ArrayList<>(); userDelegationSanitizers.add(new TestProxySanitizer(DELEGATION_KEY_CLIENTID_REGEX, REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("secret")); userDelegationSanitizers.add(new TestProxySanitizer(DELEGATION_KEY_TENANTID_REGEX, REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("secret")); return userDelegationSanitizers; } }
class TestProxyUtils { private static final ClientLogger LOGGER = new ClientLogger(TestProxyUtils.class); private static final HttpHeaderName X_RECORDING_SKIP = HttpHeaderName.fromString("x-recording-skip"); private static final List<String> JSON_PROPERTIES_TO_REDACT = new ArrayList<String>( Arrays.asList("authHeader", "accountKey", "accessToken", "accountName", "applicationId", "apiKey", "connectionString", "url", "host", "password", "userName")); private static final Map<String, String> HEADER_KEY_REGEX_TO_REDACT = new HashMap<String, String>() {{ put("Operation-Location", URL_REGEX); put("operation-location", URL_REGEX); put("Location", URL_REGEX); }}; private static final List<String> BODY_REGEX_TO_REDACT = new ArrayList<>(Arrays.asList("(?:<Value>)(?<secret>.*)(?:</Value>)", "(?:Password=)(?<secret>.*)(?:;)", "(?:User ID=)(?<secret>.*)(?:;)", "(?:<PrimaryKey>)(?<secret>.*)(?:</PrimaryKey>)", "(?:<SecondaryKey>)(?<secret>.*)(?:</SecondaryKey>)")); private static final String URL_REGEX = "(?<=http: private static final List<String> HEADER_KEYS_TO_REDACT = new ArrayList<>(Arrays.asList("Ocp-Apim-Subscription-Key", "api-key", "x-api-key")); private static final String REDACTED_VALUE = "REDACTED"; private static final String DELEGATION_KEY_CLIENTID_REGEX = "(?:<SignedOid>)(?<secret>.*)(?:</SignedOid>)"; private static final String DELEGATION_KEY_TENANTID_REGEX = "(?:<SignedTid>)(?<secret>.*)(?:</SignedTid>)"; private static final HttpHeaderName X_RECORDING_UPSTREAM_BASE_URI = HttpHeaderName.fromString("x-recording-upstream-base-uri"); private static final HttpHeaderName X_RECORDING_MODE = HttpHeaderName.fromString("x-recording-mode"); private static final HttpHeaderName X_REQUEST_MISMATCH_ERROR = HttpHeaderName.fromString("x-request-mismatch-error"); private static final HttpHeaderName X_REQUEST_KNOWN_EXCEPTION_ERROR = HttpHeaderName.fromString("x-request-known-exception-error"); private static final HttpHeaderName X_REQUEST_EXCEPTION_EXCEPTION_ERROR = HttpHeaderName.fromString("x-request-exception-exception-error"); private static final HttpHeaderName X_ABSTRACTION_IDENTIFIER = HttpHeaderName.fromString("x-abstraction-identifier"); private static volatile URL proxyUrl; /** * Adds headers required for communication with the test proxy. * * @param request The request to add headers to. * @param proxyUrl The {@link URL} the proxy lives at. * @param xRecordingId The x-recording-id value for the current session. * @param mode The current test proxy mode. * @param skipRecordingRequestBody Flag indicating to skip recording request bodies when tests run in Record mode. * @throws RuntimeException Construction of one of the URLs failed. */ /** * Sets the response URL back to the original URL before returning it through the pipeline. * @param response The {@link HttpResponse} to modify. * @return The modified response. * @throws RuntimeException Construction of one of the URLs failed. */ public static HttpResponse revertUrl(HttpResponse response) { try { URL originalUrl = UrlBuilder.parse(response.getRequest().getHeaders() .getValue(X_RECORDING_UPSTREAM_BASE_URI)) .toUrl(); UrlBuilder currentUrl = UrlBuilder.parse(response.getRequest().getUrl()); currentUrl.setScheme(originalUrl.getProtocol()); currentUrl.setHost(originalUrl.getHost()); int port = originalUrl.getPort(); if (port == -1) { currentUrl.setPort(""); } else { currentUrl.setPort(port); } response.getRequest().setUrl(currentUrl.toUrl()); return response; } catch (MalformedURLException e) { throw new RuntimeException(e); } } /** * Gets the process name of the test proxy binary. * @return The platform specific process name. * @throws UnsupportedOperationException The current OS is not recognized. */ public static String getProxyProcessName() { String osName = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (osName.contains("windows")) { return "Azure.Sdk.Tools.TestProxy.exe"; } else if (osName.contains("linux")) { return "Azure.Sdk.Tools.TestProxy"; } else if (osName.contains("mac os x")) { return "Azure.Sdk.Tools.TestProxy"; } else { throw new UnsupportedOperationException(); } } /** * Checks the return from a request through the test proxy for special error headers. * @param httpResponse The {@link HttpResponse} from the test proxy. */ public static void checkForTestProxyErrors(HttpResponse httpResponse) { String error = httpResponse.getHeaderValue(X_REQUEST_MISMATCH_ERROR); if (error == null) { error = httpResponse.getHeaderValue(X_REQUEST_KNOWN_EXCEPTION_ERROR); } if (error == null) { error = httpResponse.getHeaderValue(X_REQUEST_EXCEPTION_EXCEPTION_ERROR); } if (error != null) { throw LOGGER.logExceptionAsError(new RuntimeException("Test proxy exception: " + new String(Base64.getDecoder().decode(error), StandardCharsets.UTF_8))); } } /** * Finds the test proxy version in the source tree. * @return The version string to use. * @throws RuntimeException The eng folder could not be located in the repo. * @throws UncheckedIOException The version file could not be read properly. */ public static String getTestProxyVersion() { Path rootPath = TestUtils.getRepoRoot(); Path versionFile = Paths.get("eng", "common", "testproxy", "target_version.txt"); rootPath = rootPath.resolve(versionFile); try { return Files.readAllLines(rootPath).get(0).replace(System.getProperty("line.separator"), ""); } catch (IOException e) { throw new UncheckedIOException(e); } } /** * Gets the current URL for the test proxy. * @return The {@link URL} location of the test proxy. * @throws RuntimeException The URL could not be constructed. */ public static URL getProxyUrl() { if (proxyUrl != null) { return proxyUrl; } UrlBuilder builder = new UrlBuilder(); builder.setHost("localhost"); builder.setScheme("http"); builder.setPort(5000); try { proxyUrl = builder.toUrl(); return proxyUrl; } catch (MalformedURLException e) { throw new RuntimeException(e); } } /** * Registers the default set of sanitizers for sanitizing request and responses * @return the list of default sanitizers to be added. */ public static List<TestProxySanitizer> loadSanitizers() { List<TestProxySanitizer> sanitizers = new ArrayList<>(addDefaultRegexSanitizers()); sanitizers.add(addDefaultUrlSanitizer()); sanitizers.addAll(addDefaultBodySanitizers()); sanitizers.addAll(addDefaultHeaderKeySanitizers()); return sanitizers; } private static String createCustomMatcherRequestBody(CustomMatcher customMatcher) { return String.format("{\"ignoredHeaders\":\"%s\",\"excludedHeaders\":\"%s\",\"compareBodies\":%s,\"ignoredQueryParameters\":\"%s\", \"ignoreQueryOrdering\":%s}", getCommaSeperatedString(customMatcher.getHeadersKeyOnlyMatch()), getCommaSeperatedString(customMatcher.getExcludedHeaders()), customMatcher.isComparingBodies(), getCommaSeperatedString(customMatcher.getIgnoredQueryParameters()), customMatcher.isQueryOrderingIgnored()); } private static String getCommaSeperatedString(List<String> stringList) { if (stringList == null) { return null; } return stringList.stream() .filter(s -> s != null && !s.isEmpty()) .collect(Collectors.joining(",")); } private static String createBodyJsonKeyRequestBody(String jsonKey, String regex, String redactedValue) { if (regex == null) { return String.format("{\"value\":\"%s\",\"jsonPath\":\"%s\"}", redactedValue, jsonKey); } else { return String.format("{\"value\":\"%s\",\"jsonPath\":\"%s\",\"regex\":\"%s\"}", redactedValue, jsonKey, regex); } } private static String createRegexRequestBody(String key, String regex, String value, String groupForReplace) { if (key == null) { if (groupForReplace == null) { return String.format("{\"value\":\"%s\",\"regex\":\"%s\"}", value, regex); } else { return String.format("{\"value\":\"%s\",\"regex\":\"%s\",\"groupForReplace\":\"%s\"}", value, regex, groupForReplace); } } else if (regex == null) { return String.format("{\"key\":\"%s\",\"value\":\"%s\"}", key, value); } if (groupForReplace == null) { return String.format("{\"key\":\"%s\",\"value\":\"%s\",\"regex\":\"%s\"}", key, value, regex); } else { return String.format("{\"key\":\"%s\",\"value\":\"%s\",\"regex\":\"%s\",\"groupForReplace\":\"%s\"}", key, value, regex, groupForReplace); } } /** * Creates a list of sanitizer requests to be sent to the test proxy server. * * @param sanitizers the list of sanitizers to be added. * @param proxyUrl The proxyUrl to use when constructing requests. * @return the list of sanitizer {@link HttpRequest requests} to be sent. * @throws RuntimeException if {@link TestProxySanitizerType} is not supported. */ public static List<HttpRequest> getSanitizerRequests(List<TestProxySanitizer> sanitizers, URL proxyUrl) { return sanitizers.stream().map(testProxySanitizer -> { String requestBody; String sanitizerType; switch (testProxySanitizer.getType()) { case URL: sanitizerType = TestProxySanitizerType.URL.getName(); requestBody = createRegexRequestBody(null, testProxySanitizer.getRegex(), testProxySanitizer.getRedactedValue(), testProxySanitizer.getGroupForReplace()); return createHttpRequest(requestBody, sanitizerType, proxyUrl); case BODY_REGEX: sanitizerType = TestProxySanitizerType.BODY_REGEX.getName(); requestBody = createRegexRequestBody(null, testProxySanitizer.getRegex(), testProxySanitizer.getRedactedValue(), testProxySanitizer.getGroupForReplace()); return createHttpRequest(requestBody, sanitizerType, proxyUrl); case BODY_KEY: sanitizerType = TestProxySanitizerType.BODY_KEY.getName(); requestBody = createBodyJsonKeyRequestBody(testProxySanitizer.getKey(), testProxySanitizer.getRegex(), testProxySanitizer.getRedactedValue()); return createHttpRequest(requestBody, sanitizerType, proxyUrl); case HEADER: sanitizerType = HEADER.getName(); if (testProxySanitizer.getKey() == null && testProxySanitizer.getRegex() == null) { throw new RuntimeException( String.format("Missing regexKey and/or headerKey for sanitizer type {%s}", sanitizerType)); } requestBody = createRegexRequestBody(testProxySanitizer.getKey(), testProxySanitizer.getRegex(), testProxySanitizer.getRedactedValue(), testProxySanitizer.getGroupForReplace()); return createHttpRequest(requestBody, sanitizerType, proxyUrl); default: throw new RuntimeException( String.format("Sanitizer type {%s} not supported", testProxySanitizer.getType())); } }).collect(Collectors.toList()); } private static HttpRequest createHttpRequest(String requestBody, String sanitizerType, URL proxyUrl) { HttpRequest request = new HttpRequest(HttpMethod.POST, String.format("%s/Admin/AddSanitizer", proxyUrl.toString())) .setBody(requestBody); request.setHeader(X_ABSTRACTION_IDENTIFIER, sanitizerType); return request; } /** * Creates a {@link List} of {@link HttpRequest} to be sent to the test proxy to register matchers. * @param matchers The {@link TestProxyRequestMatcher}s to encode into requests. * @param proxyUrl The proxyUrl to use when constructing requests. * @return The {@link HttpRequest}s to send to the proxy. * @throws RuntimeException The {@link TestProxyRequestMatcher.TestProxyRequestMatcherType} is unsupported. */ public static List<HttpRequest> getMatcherRequests(List<TestProxyRequestMatcher> matchers, URL proxyUrl) { return matchers.stream().map(testProxyMatcher -> { HttpRequest request; String matcherType; switch (testProxyMatcher.getType()) { case HEADERLESS: matcherType = TestProxyRequestMatcher.TestProxyRequestMatcherType.HEADERLESS.getName(); request = new HttpRequest(HttpMethod.POST, String.format("%s/Admin/setmatcher", proxyUrl.toString())); break; case BODILESS: request = new HttpRequest(HttpMethod.POST, String.format("%s/Admin/setmatcher", proxyUrl.toString())); matcherType = TestProxyRequestMatcher.TestProxyRequestMatcherType.BODILESS.getName(); break; case CUSTOM: CustomMatcher customMatcher = (CustomMatcher) testProxyMatcher; String requestBody = createCustomMatcherRequestBody(customMatcher); matcherType = TestProxyRequestMatcher.TestProxyRequestMatcherType.CUSTOM.getName(); request = new HttpRequest(HttpMethod.POST, String.format("%s/Admin/setmatcher", proxyUrl.toString())).setBody(requestBody); break; default: throw new RuntimeException(String.format("Matcher type {%s} not supported", testProxyMatcher.getType())); } request.setHeader(X_ABSTRACTION_IDENTIFIER, matcherType); return request; }).collect(Collectors.toList()); } private static TestProxySanitizer addDefaultUrlSanitizer() { return new TestProxySanitizer(URL_REGEX, REDACTED_VALUE, TestProxySanitizerType.URL); } private static List<TestProxySanitizer> addDefaultBodySanitizers() { return JSON_PROPERTIES_TO_REDACT.stream() .map(jsonProperty -> new TestProxySanitizer(String.format("$..%s", jsonProperty), null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY)) .collect(Collectors.toList()); } private static List<TestProxySanitizer> addDefaultRegexSanitizers() { List<TestProxySanitizer> regexSanitizers = getUserDelegationSanitizers(); regexSanitizers.addAll(BODY_REGEX_TO_REDACT.stream() .map(bodyRegex -> new TestProxySanitizer(bodyRegex, REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("secret")) .collect(Collectors.toList())); List<TestProxySanitizer> keyRegexSanitizers = new ArrayList<>(); HEADER_KEY_REGEX_TO_REDACT.forEach((key, regex) -> keyRegexSanitizers.add(new TestProxySanitizer(key, regex, REDACTED_VALUE, HEADER))); regexSanitizers.addAll(keyRegexSanitizers); return regexSanitizers; } private static List<TestProxySanitizer> addDefaultHeaderKeySanitizers() { return HEADER_KEYS_TO_REDACT.stream() .map(headerKey -> new TestProxySanitizer(headerKey, null, REDACTED_VALUE, HEADER)) .collect(Collectors.toList()); } private static List<TestProxySanitizer> getUserDelegationSanitizers() { List<TestProxySanitizer> userDelegationSanitizers = new ArrayList<>(); userDelegationSanitizers.add(new TestProxySanitizer(DELEGATION_KEY_CLIENTID_REGEX, REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("secret")); userDelegationSanitizers.add(new TestProxySanitizer(DELEGATION_KEY_TENANTID_REGEX, REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("secret")); return userDelegationSanitizers; } }
```suggestion String userName = "<USERNAME>"; ```
public static void main(String[] args) { DefaultAzureCredential defaultAzureCredential = new DefaultAzureCredentialBuilder().build(); TokenRequestContext trc = new TokenRequestContext().addScopes("acca5fbb-b7e4-4009-81f1-37e38fd66d78/.default"); TokenRefreshCache tokenRefreshCache = new TokenRefreshCache(defaultAzureCredential, trc); AccessToken accessToken = tokenRefreshCache.getAccessToken(); String hostName = "<HOST_NAME>"; String userName = "<USER_NAME>"; RedisClient client = createLettuceRedisClient(hostName, 6380, userName, accessToken); StatefulRedisConnection<String, String> connection = client.connect(StringCodec.UTF8); int maxTries = 3; int i = 0; while (i < maxTries) { RedisStringCommands<String, String> sync = connection.sync(); try { sync.set("Az:testKey", "testVal"); System.out.println(sync.get("Az:testKey")); break; } catch (RedisException e) { e.printStackTrace(); if (!connection.isOpen()) { client = createLettuceRedisClient(hostName, 6380, userName, tokenRefreshCache.getAccessToken()); connection = client.connect(StringCodec.UTF8); sync = connection.sync(); } } catch (Exception e) { e.printStackTrace(); } i++; } }
String userName = "<USER_NAME>";
public static void main(String[] args) { DefaultAzureCredential defaultAzureCredential = new DefaultAzureCredentialBuilder().build(); TokenRequestContext trc = new TokenRequestContext().addScopes("acca5fbb-b7e4-4009-81f1-37e38fd66d78/.default"); TokenRefreshCache tokenRefreshCache = new TokenRefreshCache(defaultAzureCredential, trc); AccessToken accessToken = tokenRefreshCache.getAccessToken(); String hostName = "<HOST_NAME>"; String userName = "<USERNAME>"; RedisClient client = createLettuceRedisClient(hostName, 6380, userName, accessToken); StatefulRedisConnection<String, String> connection = client.connect(StringCodec.UTF8); RedisCommands<String, String> sync = connection.sync(); tokenRefreshCache .setLettuceInstanceToAuthenticate(sync) .setUsername(userName); int maxTries = 3; int i = 0; while (i < maxTries) { try { sync.set("Az:testKey", "testVal"); System.out.println(sync.get("Az:testKey")); break; } catch (RedisException e) { e.printStackTrace(); if (!connection.isOpen()) { client = createLettuceRedisClient(hostName, 6380, userName, tokenRefreshCache.getAccessToken()); connection = client.connect(StringCodec.UTF8); sync = connection.sync(); tokenRefreshCache .setLettuceInstanceToAuthenticate(sync) .setUsername(userName); } } catch (Exception e) { e.printStackTrace(); } i++; } }
class AuthenticateWithTokenCache { private static RedisClient createLettuceRedisClient(String hostName, int port, String username, AccessToken accessToken) { RedisURI redisURI = RedisURI.Builder.redis(hostName) .withPort(port) .withSsl(true) .withAuthentication(username, accessToken.getToken()) .withClientName("LettuceClient") .build(); RedisClient client = RedisClient.create(redisURI); client.setOptions(ClientOptions.builder() .socketOptions(SocketOptions.builder() .keepAlive(true) .build()) .protocolVersion(ProtocolVersion.RESP2) .build()); return client; } /** * The token cache to store and proactively refresh the access token. */ public static class TokenRefreshCache { private final TokenCredential tokenCredential; private final TokenRequestContext tokenRequestContext; private final Timer timer; private volatile AccessToken accessToken; private final Duration maxRefreshOffset = Duration.ofMinutes(5); private final Duration baseRefreshOffset = Duration.ofMinutes(2); /** * Creates an instance of TokenRefreshCache * @param tokenCredential the token credential to be used for authentication. * @param tokenRequestContext the token request context to be used for authentication. */ public TokenRefreshCache(TokenCredential tokenCredential, TokenRequestContext tokenRequestContext) { this.tokenCredential = tokenCredential; this.tokenRequestContext = tokenRequestContext; this.timer = new Timer(); } /** * Gets the cached access token. * @return the {@link AccessToken} */ public AccessToken getAccessToken() { if (accessToken != null) { return accessToken; } else { TokenRefreshTask tokenRefreshTask = new TokenRefreshTask(); accessToken = tokenCredential.getToken(tokenRequestContext).block(); timer.schedule(tokenRefreshTask, getTokenRefreshDelay()); return accessToken; } } private class TokenRefreshTask extends TimerTask { public void run() { accessToken = tokenCredential.getToken(tokenRequestContext).block(); System.out.println("Refreshed Token with Expiry: " + accessToken.getExpiresAt().toEpochSecond()); timer.schedule(new TokenRefreshTask(), getTokenRefreshDelay()); } } private long getTokenRefreshDelay() { return ((accessToken.getExpiresAt() .minusSeconds(ThreadLocalRandom.current().nextLong(baseRefreshOffset.getSeconds(), maxRefreshOffset.getSeconds())) .toEpochSecond() - OffsetDateTime.now().toEpochSecond()) * 1000); } } }
class AuthenticateWithTokenCache { private static RedisClient createLettuceRedisClient(String hostName, int port, String username, AccessToken accessToken) { RedisURI redisURI = RedisURI.Builder.redis(hostName) .withPort(port) .withSsl(true) .withAuthentication(username, accessToken.getToken()) .withClientName("LettuceClient") .build(); RedisClient client = RedisClient.create(redisURI); client.setOptions(ClientOptions.builder() .socketOptions(SocketOptions.builder() .keepAlive(true) .build()) .protocolVersion(ProtocolVersion.RESP2) .build()); return client; } /** * The token cache to store and proactively refresh the access token. */ public static class TokenRefreshCache { private final TokenCredential tokenCredential; private final TokenRequestContext tokenRequestContext; private final Timer timer; private volatile AccessToken accessToken; private final Duration maxRefreshOffset = Duration.ofMinutes(5); private final Duration baseRefreshOffset = Duration.ofMinutes(2); private RedisCommands<String, String> lettuceInstanceToAuthenticate; private String username; /** * Creates an instance of TokenRefreshCache * @param tokenCredential the token credential to be used for authentication. * @param tokenRequestContext the token request context to be used for authentication. */ public TokenRefreshCache(TokenCredential tokenCredential, TokenRequestContext tokenRequestContext) { this.tokenCredential = tokenCredential; this.tokenRequestContext = tokenRequestContext; this.timer = new Timer(true); } /** * Gets the cached access token. * @return the {@link AccessToken} */ public AccessToken getAccessToken() { if (accessToken != null) { return accessToken; } else { TokenRefreshTask tokenRefreshTask = new TokenRefreshTask(); accessToken = tokenCredential.getToken(tokenRequestContext).block(); timer.schedule(tokenRefreshTask, getTokenRefreshDelay()); return accessToken; } } private class TokenRefreshTask extends TimerTask { public void run() { accessToken = tokenCredential.getToken(tokenRequestContext).block(); System.out.println("Refreshed Token with Expiry: " + accessToken.getExpiresAt().toEpochSecond()); if (lettuceInstanceToAuthenticate != null && !CoreUtils.isNullOrEmpty(username)) { lettuceInstanceToAuthenticate.auth(username, accessToken.getToken()); System.out.println("Refreshed Lettuce Connection with fresh access token, token expires at : " + accessToken.getExpiresAt().toEpochSecond()); } timer.schedule(new TokenRefreshTask(), getTokenRefreshDelay()); } } private long getTokenRefreshDelay() { return ((accessToken.getExpiresAt() .minusSeconds(ThreadLocalRandom.current().nextLong(baseRefreshOffset.getSeconds(), maxRefreshOffset.getSeconds())) .toEpochSecond() - OffsetDateTime.now().toEpochSecond()) * 1000); } /** * Sets the Lettuce instance to proactively authenticate before token expiry. * @param lettuceInstanceToAuthenticate the instance to authenticate * @return the updated instance */ public TokenRefreshCache setLettuceInstanceToAuthenticate(RedisCommands<String, String> lettuceInstanceToAuthenticate) { this.lettuceInstanceToAuthenticate = lettuceInstanceToAuthenticate; return this; } /** * Sets the username to authenticate jedis instance with. * @param username the username to authenticate with * @return the updated instance */ public TokenRefreshCache setUsername(String username) { this.username = username; return this; } } }
```suggestion String userName = "<USERNAME>"; ```
public static void main(String[] args) { DefaultAzureCredential defaultAzureCredential = new DefaultAzureCredentialBuilder().build(); TokenRequestContext trc = new TokenRequestContext().addScopes("acca5fbb-b7e4-4009-81f1-37e38fd66d78/.default"); AccessToken accessToken = getAccessToken(defaultAzureCredential, trc); String hostName = "<HOST_NAME>"; String userName = "<USER_NAME>"; RedisClient client = createLettuceRedisClient(hostName, 6380, userName, accessToken); StatefulRedisConnection<String, String> connection = client.connect(StringCodec.UTF8); int maxTries = 3; int i = 0; while (i < maxTries) { RedisStringCommands<String, String> sync = connection.sync(); try { sync.set("Az:testKey", "testVal"); System.out.println(sync.get("Az:testKey")); break; } catch (RedisException e) { e.printStackTrace(); if (!connection.isOpen()) { client = createLettuceRedisClient(hostName, 6380, userName, getAccessToken(defaultAzureCredential, trc)); connection = client.connect(StringCodec.UTF8); sync = connection.sync(); } } catch (Exception e) { e.printStackTrace(); } i++; } }
String userName = "<USER_NAME>";
public static void main(String[] args) { DefaultAzureCredential defaultAzureCredential = new DefaultAzureCredentialBuilder().build(); TokenRequestContext trc = new TokenRequestContext().addScopes("acca5fbb-b7e4-4009-81f1-37e38fd66d78/.default"); AccessToken accessToken = getAccessToken(defaultAzureCredential, trc); String hostName = "<HOST_NAME>"; String userName = "<USERNAME>"; RedisClient client = createLettuceRedisClient(hostName, 6380, userName, accessToken); StatefulRedisConnection<String, String> connection = client.connect(StringCodec.UTF8); int maxTries = 3; int i = 0; while (i < maxTries) { RedisCommands<String, String> sync = connection.sync(); try { sync.set("Az:testKey", "testVal"); System.out.println(sync.get("Az:testKey")); break; } catch (RedisException e) { e.printStackTrace(); if (!connection.isOpen()) { client = createLettuceRedisClient(hostName, 6380, userName, getAccessToken(defaultAzureCredential, trc)); connection = client.connect(StringCodec.UTF8); sync = connection.sync(); } } catch (Exception e) { e.printStackTrace(); } i++; } }
class HandleReauthentication { private static RedisClient createLettuceRedisClient(String hostName, int port, String username, AccessToken accessToken) { RedisURI redisURI = RedisURI.Builder.redis(hostName) .withPort(port) .withSsl(true) .withAuthentication(username, accessToken.getToken()) .withClientName("LettuceClient") .build(); RedisClient client = RedisClient.create(redisURI); client.setOptions(ClientOptions.builder() .socketOptions(SocketOptions.builder() .keepAlive(true) .build()) .protocolVersion(ProtocolVersion.RESP2) .build()); return client; } private static AccessToken getAccessToken(TokenCredential tokenCredential, TokenRequestContext trc) { return tokenCredential.getToken(trc).block(); } }
class HandleReauthentication { private static RedisClient createLettuceRedisClient(String hostName, int port, String username, AccessToken accessToken) { RedisURI redisURI = RedisURI.Builder.redis(hostName) .withPort(port) .withSsl(true) .withAuthentication(username, accessToken.getToken()) .withClientName("LettuceClient") .build(); RedisClient client = RedisClient.create(redisURI); client.setOptions(ClientOptions.builder() .socketOptions(SocketOptions.builder() .keepAlive(true) .build()) .protocolVersion(ProtocolVersion.RESP2) .build()); return client; } private static AccessToken getAccessToken(TokenCredential tokenCredential, TokenRequestContext trc) { return tokenCredential.getToken(trc).block(); } }
there used to be no explicit timeout here and it turns out this test could run for [40+ seconds](https://dev.azure.com/azure-sdk/public/_build/results?buildId=2741645&view=ms.vss-test-web.build-test-results-tab&runId=40672150&resultId=104412) and finish successfully. So increasing timeout to stabilize test.
public void testLongRunningWontOverflow() throws Exception { AtomicLong refreshes = new AtomicLong(0); SimpleTokenCache cache = new SimpleTokenCache(() -> { refreshes.incrementAndGet(); return remoteGetTokenThatExpiresSoonAsync(); }); VirtualTimeScheduler virtualTimeScheduler = VirtualTimeScheduler.create(); CountDownLatch latch = new CountDownLatch(1); Flux.interval(Duration.ofMillis(100), virtualTimeScheduler) .take(100) .flatMap(i -> cache.getToken()) .doOnComplete(latch::countDown) .subscribe(); virtualTimeScheduler.advanceTimeBy(Duration.ofSeconds(40)); assertTrue(latch.await(60, TimeUnit.SECONDS)); assertTrue(refreshes.get() <= 11); }
assertTrue(latch.await(60, TimeUnit.SECONDS));
public void testLongRunningWontOverflow() throws Exception { AtomicLong refreshes = new AtomicLong(0); SimpleTokenCache cache = new SimpleTokenCache(() -> { refreshes.incrementAndGet(); return remoteGetTokenThatExpiresSoonAsync(); }); VirtualTimeScheduler virtualTimeScheduler = VirtualTimeScheduler.create(); CountDownLatch latch = new CountDownLatch(1); Flux.interval(Duration.ofMillis(100), virtualTimeScheduler) .take(100) .flatMap(i -> cache.getToken()) .doOnComplete(latch::countDown) .subscribe(); virtualTimeScheduler.advanceTimeBy(Duration.ofSeconds(40)); assertTrue(latch.await(60, TimeUnit.SECONDS)); assertTrue(refreshes.get() <= 11); }
class TokenCacheTests { @BeforeEach void beforeEach() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(30)); } @Test public void testOnlyOneThreadRefreshesToken() { AtomicLong refreshes = new AtomicLong(0); SimpleTokenCache cache = new SimpleTokenCache(() -> { refreshes.incrementAndGet(); return incrementalRemoteGetTokenAsync(new AtomicInteger(1)); }); StepVerifier.create(Flux.range(1, 10).flatMap(ignored -> Mono.just(OffsetDateTime.now())) .parallel(10) .runOn(Schedulers.boundedElastic()) .flatMap(start -> cache.getToken()) .then()) .verifyComplete(); assertEquals(1, refreshes.get()); } @Test private Mono<AccessToken> remoteGetTokenThatExpiresSoonAsync() { return Mono.delay(Duration.ofMillis(1000)) .map(l -> new Token(Integer.toString(ThreadLocalRandom.current().nextInt(100)), 0)); } private Mono<AccessToken> incrementalRemoteGetTokenAsync(AtomicInteger latency) { return Mono.delay(Duration.ofSeconds(latency.getAndIncrement())) .map(l -> new Token(Integer.toString(ThreadLocalRandom.current().nextInt(100)))); } private static class Token extends AccessToken { Token(String token) { this(token, 5000); } Token(String token, long validityInMillis) { super(token, OffsetDateTime.now().plus(Duration.ofMillis(validityInMillis))); } } }
class TokenCacheTests { @BeforeEach void beforeEach() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(30)); } @Test public void testOnlyOneThreadRefreshesToken() { AtomicLong refreshes = new AtomicLong(0); SimpleTokenCache cache = new SimpleTokenCache(() -> { refreshes.incrementAndGet(); return incrementalRemoteGetTokenAsync(new AtomicInteger(1)); }); StepVerifier.create(Flux.range(1, 10).flatMap(ignored -> Mono.just(OffsetDateTime.now())) .parallel(10) .runOn(Schedulers.boundedElastic()) .flatMap(start -> cache.getToken()) .then()) .verifyComplete(); assertEquals(1, refreshes.get()); } @Test private Mono<AccessToken> remoteGetTokenThatExpiresSoonAsync() { return Mono.delay(Duration.ofMillis(1000)) .map(l -> new Token(Integer.toString(ThreadLocalRandom.current().nextInt(100)), 0)); } private Mono<AccessToken> incrementalRemoteGetTokenAsync(AtomicInteger latency) { return Mono.delay(Duration.ofSeconds(latency.getAndIncrement())) .map(l -> new Token(Integer.toString(ThreadLocalRandom.current().nextInt(100)))); } private static class Token extends AccessToken { Token(String token) { this(token, 5000); } Token(String token, long validityInMillis) { super(token, OffsetDateTime.now().plus(Duration.ofMillis(validityInMillis))); } } }
manual change
public Mono<MetricCollection> executeAsync() { return this .manager() .serviceClient() .getMetricsOperations() .listWithResponseAsync( this.inner.resourceId(), String .format( "%s/%s", DateTimeFormatter.ISO_INSTANT.format(this.queryStartTime.atZoneSameInstant(ZoneOffset.UTC)), DateTimeFormatter.ISO_INSTANT.format(this.queryEndTime.atZoneSameInstant(ZoneOffset.UTC))), this.interval, this.inner.name().value(), this.aggreagation, this.top, this.orderBy, this.odataFilter, this.resultType, this.namespaceFilter, null, null ) .map(Response::getValue) .map(MetricCollectionImpl::new); }
)
public Mono<MetricCollection> executeAsync() { return this .manager() .serviceClient() .getMetricsOperations() .listWithResponseAsync( this.inner.resourceId(), String .format( "%s/%s", DateTimeFormatter.ISO_INSTANT.format(this.queryStartTime.atZoneSameInstant(ZoneOffset.UTC)), DateTimeFormatter.ISO_INSTANT.format(this.queryEndTime.atZoneSameInstant(ZoneOffset.UTC))), this.interval, this.inner.name().value(), this.aggreagation, this.top, this.orderBy, this.odataFilter, this.resultType, this.namespaceFilter, null, null ) .map(Response::getValue) .map(MetricCollectionImpl::new); }
class MetricDefinitionImpl extends WrapperImpl<MetricDefinitionInner> implements MetricDefinition, MetricDefinition.MetricsQueryDefinition { private final MonitorManager myManager; private final MetricDefinitionInner inner; private final LocalizableString name; private List<LocalizableString> dimensions; private OffsetDateTime queryStartTime = null; private OffsetDateTime queryEndTime = null; private String aggreagation; private Duration interval; private String odataFilter; private ResultType resultType; private Integer top; private String orderBy; private String namespaceFilter; MetricDefinitionImpl(final MetricDefinitionInner innerModel, final MonitorManager monitorManager) { super(innerModel); this.myManager = monitorManager; this.inner = innerModel; this.name = (inner.name() == null) ? null : new LocalizableStringImpl(inner.name()); this.dimensions = null; if (this.inner.dimensions() != null && this.inner.dimensions().size() > 0) { this.dimensions = new ArrayList<>(); for (LocalizableStringInner lsi : inner.dimensions()) { this.dimensions.add(new LocalizableStringImpl(lsi)); } } } @Override public MonitorManager manager() { return this.myManager; } public String resourceId() { return this.inner.resourceId(); } public LocalizableString name() { return this.name; } @Override public String namespace() { return this.inner.namespace(); } @Override public boolean isDimensionRequired() { return this.inner.isDimensionRequired(); } @Override public List<LocalizableString> dimensions() { return this.dimensions; } @Override public List<AggregationType> supportedAggregationTypes() { return this.inner.supportedAggregationTypes(); } public Unit unit() { return this.innerModel().unit(); } public AggregationType primaryAggregationType() { return this.inner.primaryAggregationType(); } public List<MetricAvailability> metricAvailabilities() { return this.inner.metricAvailabilities(); } public String id() { return this.inner.id(); } @Override public MetricDefinitionImpl defineQuery() { this.aggreagation = null; this.interval = null; this.resultType = null; this.top = null; this.orderBy = null; this.namespaceFilter = null; return this; } @Override public MetricDefinitionImpl startingFrom(OffsetDateTime startTime) { this.queryStartTime = startTime; return this; } @Override public MetricDefinitionImpl endsBefore(OffsetDateTime endTime) { this.queryEndTime = endTime; return this; } @Override public MetricDefinitionImpl withAggregation(String aggregation) { this.aggreagation = aggregation; return this; } @Override public MetricDefinitionImpl withInterval(Duration interval) { this.interval = interval; return this; } @Override public MetricDefinitionImpl withOdataFilter(String odataFilter) { this.odataFilter = odataFilter; return this; } @Override public MetricDefinitionImpl withResultType(ResultType resultType) { this.resultType = resultType; return this; } @Override public MetricDefinitionImpl selectTop(int top) { this.top = top; return this; } @Override public MetricDefinitionImpl orderBy(String orderBy) { this.orderBy = orderBy; return this; } @Override public MetricsQueryDefinitionStages.WithMetricsQueryExecute filterByNamespace(String namespaceName) { this.namespaceFilter = namespaceName; return this; } @Override public MetricCollection execute() { return this.executeAsync().block(); } @Override }
class MetricDefinitionImpl extends WrapperImpl<MetricDefinitionInner> implements MetricDefinition, MetricDefinition.MetricsQueryDefinition { private final MonitorManager myManager; private final MetricDefinitionInner inner; private final LocalizableString name; private List<LocalizableString> dimensions; private OffsetDateTime queryStartTime = null; private OffsetDateTime queryEndTime = null; private String aggreagation; private Duration interval; private String odataFilter; private ResultType resultType; private Integer top; private String orderBy; private String namespaceFilter; MetricDefinitionImpl(final MetricDefinitionInner innerModel, final MonitorManager monitorManager) { super(innerModel); this.myManager = monitorManager; this.inner = innerModel; this.name = (inner.name() == null) ? null : new LocalizableStringImpl(inner.name()); this.dimensions = null; if (this.inner.dimensions() != null && this.inner.dimensions().size() > 0) { this.dimensions = new ArrayList<>(); for (LocalizableStringInner lsi : inner.dimensions()) { this.dimensions.add(new LocalizableStringImpl(lsi)); } } } @Override public MonitorManager manager() { return this.myManager; } public String resourceId() { return this.inner.resourceId(); } public LocalizableString name() { return this.name; } @Override public String namespace() { return this.inner.namespace(); } @Override public boolean isDimensionRequired() { return this.inner.isDimensionRequired(); } @Override public List<LocalizableString> dimensions() { return this.dimensions; } @Override public List<AggregationType> supportedAggregationTypes() { return this.inner.supportedAggregationTypes(); } public Unit unit() { return this.innerModel().unit(); } public AggregationType primaryAggregationType() { return this.inner.primaryAggregationType(); } public List<MetricAvailability> metricAvailabilities() { return this.inner.metricAvailabilities(); } public String id() { return this.inner.id(); } @Override public MetricDefinitionImpl defineQuery() { this.aggreagation = null; this.interval = null; this.resultType = null; this.top = null; this.orderBy = null; this.namespaceFilter = null; return this; } @Override public MetricDefinitionImpl startingFrom(OffsetDateTime startTime) { this.queryStartTime = startTime; return this; } @Override public MetricDefinitionImpl endsBefore(OffsetDateTime endTime) { this.queryEndTime = endTime; return this; } @Override public MetricDefinitionImpl withAggregation(String aggregation) { this.aggreagation = aggregation; return this; } @Override public MetricDefinitionImpl withInterval(Duration interval) { this.interval = interval; return this; } @Override public MetricDefinitionImpl withOdataFilter(String odataFilter) { this.odataFilter = odataFilter; return this; } @Override public MetricDefinitionImpl withResultType(ResultType resultType) { this.resultType = resultType; return this; } @Override public MetricDefinitionImpl selectTop(int top) { this.top = top; return this; } @Override public MetricDefinitionImpl orderBy(String orderBy) { this.orderBy = orderBy; return this; } @Override public MetricsQueryDefinitionStages.WithMetricsQueryExecute filterByNamespace(String namespaceName) { this.namespaceFilter = namespaceName; return this; } @Override public MetricCollection execute() { return this.executeAsync().block(); } @Override }
Because the method `getMetrics()` was removed from `MonitorClient`, so fixed to `getMetricsOperations()`.
public Mono<MetricCollection> executeAsync() { return this .manager() .serviceClient() .getMetricsOperations() .listWithResponseAsync( this.inner.resourceId(), String .format( "%s/%s", DateTimeFormatter.ISO_INSTANT.format(this.queryStartTime.atZoneSameInstant(ZoneOffset.UTC)), DateTimeFormatter.ISO_INSTANT.format(this.queryEndTime.atZoneSameInstant(ZoneOffset.UTC))), this.interval, this.inner.name().value(), this.aggreagation, this.top, this.orderBy, this.odataFilter, this.resultType, this.namespaceFilter, null, null ) .map(Response::getValue) .map(MetricCollectionImpl::new); }
)
public Mono<MetricCollection> executeAsync() { return this .manager() .serviceClient() .getMetricsOperations() .listWithResponseAsync( this.inner.resourceId(), String .format( "%s/%s", DateTimeFormatter.ISO_INSTANT.format(this.queryStartTime.atZoneSameInstant(ZoneOffset.UTC)), DateTimeFormatter.ISO_INSTANT.format(this.queryEndTime.atZoneSameInstant(ZoneOffset.UTC))), this.interval, this.inner.name().value(), this.aggreagation, this.top, this.orderBy, this.odataFilter, this.resultType, this.namespaceFilter, null, null ) .map(Response::getValue) .map(MetricCollectionImpl::new); }
class MetricDefinitionImpl extends WrapperImpl<MetricDefinitionInner> implements MetricDefinition, MetricDefinition.MetricsQueryDefinition { private final MonitorManager myManager; private final MetricDefinitionInner inner; private final LocalizableString name; private List<LocalizableString> dimensions; private OffsetDateTime queryStartTime = null; private OffsetDateTime queryEndTime = null; private String aggreagation; private Duration interval; private String odataFilter; private ResultType resultType; private Integer top; private String orderBy; private String namespaceFilter; MetricDefinitionImpl(final MetricDefinitionInner innerModel, final MonitorManager monitorManager) { super(innerModel); this.myManager = monitorManager; this.inner = innerModel; this.name = (inner.name() == null) ? null : new LocalizableStringImpl(inner.name()); this.dimensions = null; if (this.inner.dimensions() != null && this.inner.dimensions().size() > 0) { this.dimensions = new ArrayList<>(); for (LocalizableStringInner lsi : inner.dimensions()) { this.dimensions.add(new LocalizableStringImpl(lsi)); } } } @Override public MonitorManager manager() { return this.myManager; } public String resourceId() { return this.inner.resourceId(); } public LocalizableString name() { return this.name; } @Override public String namespace() { return this.inner.namespace(); } @Override public boolean isDimensionRequired() { return this.inner.isDimensionRequired(); } @Override public List<LocalizableString> dimensions() { return this.dimensions; } @Override public List<AggregationType> supportedAggregationTypes() { return this.inner.supportedAggregationTypes(); } public Unit unit() { return this.innerModel().unit(); } public AggregationType primaryAggregationType() { return this.inner.primaryAggregationType(); } public List<MetricAvailability> metricAvailabilities() { return this.inner.metricAvailabilities(); } public String id() { return this.inner.id(); } @Override public MetricDefinitionImpl defineQuery() { this.aggreagation = null; this.interval = null; this.resultType = null; this.top = null; this.orderBy = null; this.namespaceFilter = null; return this; } @Override public MetricDefinitionImpl startingFrom(OffsetDateTime startTime) { this.queryStartTime = startTime; return this; } @Override public MetricDefinitionImpl endsBefore(OffsetDateTime endTime) { this.queryEndTime = endTime; return this; } @Override public MetricDefinitionImpl withAggregation(String aggregation) { this.aggreagation = aggregation; return this; } @Override public MetricDefinitionImpl withInterval(Duration interval) { this.interval = interval; return this; } @Override public MetricDefinitionImpl withOdataFilter(String odataFilter) { this.odataFilter = odataFilter; return this; } @Override public MetricDefinitionImpl withResultType(ResultType resultType) { this.resultType = resultType; return this; } @Override public MetricDefinitionImpl selectTop(int top) { this.top = top; return this; } @Override public MetricDefinitionImpl orderBy(String orderBy) { this.orderBy = orderBy; return this; } @Override public MetricsQueryDefinitionStages.WithMetricsQueryExecute filterByNamespace(String namespaceName) { this.namespaceFilter = namespaceName; return this; } @Override public MetricCollection execute() { return this.executeAsync().block(); } @Override }
class MetricDefinitionImpl extends WrapperImpl<MetricDefinitionInner> implements MetricDefinition, MetricDefinition.MetricsQueryDefinition { private final MonitorManager myManager; private final MetricDefinitionInner inner; private final LocalizableString name; private List<LocalizableString> dimensions; private OffsetDateTime queryStartTime = null; private OffsetDateTime queryEndTime = null; private String aggreagation; private Duration interval; private String odataFilter; private ResultType resultType; private Integer top; private String orderBy; private String namespaceFilter; MetricDefinitionImpl(final MetricDefinitionInner innerModel, final MonitorManager monitorManager) { super(innerModel); this.myManager = monitorManager; this.inner = innerModel; this.name = (inner.name() == null) ? null : new LocalizableStringImpl(inner.name()); this.dimensions = null; if (this.inner.dimensions() != null && this.inner.dimensions().size() > 0) { this.dimensions = new ArrayList<>(); for (LocalizableStringInner lsi : inner.dimensions()) { this.dimensions.add(new LocalizableStringImpl(lsi)); } } } @Override public MonitorManager manager() { return this.myManager; } public String resourceId() { return this.inner.resourceId(); } public LocalizableString name() { return this.name; } @Override public String namespace() { return this.inner.namespace(); } @Override public boolean isDimensionRequired() { return this.inner.isDimensionRequired(); } @Override public List<LocalizableString> dimensions() { return this.dimensions; } @Override public List<AggregationType> supportedAggregationTypes() { return this.inner.supportedAggregationTypes(); } public Unit unit() { return this.innerModel().unit(); } public AggregationType primaryAggregationType() { return this.inner.primaryAggregationType(); } public List<MetricAvailability> metricAvailabilities() { return this.inner.metricAvailabilities(); } public String id() { return this.inner.id(); } @Override public MetricDefinitionImpl defineQuery() { this.aggreagation = null; this.interval = null; this.resultType = null; this.top = null; this.orderBy = null; this.namespaceFilter = null; return this; } @Override public MetricDefinitionImpl startingFrom(OffsetDateTime startTime) { this.queryStartTime = startTime; return this; } @Override public MetricDefinitionImpl endsBefore(OffsetDateTime endTime) { this.queryEndTime = endTime; return this; } @Override public MetricDefinitionImpl withAggregation(String aggregation) { this.aggreagation = aggregation; return this; } @Override public MetricDefinitionImpl withInterval(Duration interval) { this.interval = interval; return this; } @Override public MetricDefinitionImpl withOdataFilter(String odataFilter) { this.odataFilter = odataFilter; return this; } @Override public MetricDefinitionImpl withResultType(ResultType resultType) { this.resultType = resultType; return this; } @Override public MetricDefinitionImpl selectTop(int top) { this.top = top; return this; } @Override public MetricDefinitionImpl orderBy(String orderBy) { this.orderBy = orderBy; return this; } @Override public MetricsQueryDefinitionStages.WithMetricsQueryExecute filterByNamespace(String namespaceName) { this.namespaceFilter = namespaceName; return this; } @Override public MetricCollection execute() { return this.executeAsync().block(); } @Override }
nit: we should define these default values in separate constant variables above like other methods.
public static int speculationThreshold() { return getJVMConfigAsInt(SPECULATION_THRESHOLD, 500); }
return getJVMConfigAsInt(SPECULATION_THRESHOLD, 500);
public static int speculationThreshold() { return getJVMConfigAsInt(SPECULATION_THRESHOLD, 500); }
class Configs { private static final Logger logger = LoggerFactory.getLogger(Configs.class); /** * Integer value specifying the speculation type * <pre> * 0 - No speculation * 1 - Threshold based speculation * </pre> */ public static final String SPECULATION_TYPE = "COSMOS_SPECULATION_TYPE"; public static final String SPECULATION_THRESHOLD = "COSMOS_SPECULATION_THRESHOLD"; public static final String SPECULATION_THRESHOLD_STEP = "COSMOS_SPECULATION_THRESHOLD_STEP"; private final SslContext sslContext; private static final String PROTOCOL_ENVIRONMENT_VARIABLE = "AZURE_COSMOS_DIRECT_MODE_PROTOCOL"; private static final String PROTOCOL_PROPERTY = "azure.cosmos.directModeProtocol"; private static final Protocol DEFAULT_PROTOCOL = Protocol.TCP; private static final String UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS = "COSMOS.UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS"; private static final String GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS = "COSMOS.GLOBAL_ENDPOINT_MANAGER_MAX_INIT_TIME_IN_SECONDS"; private static final String MAX_HTTP_BODY_LENGTH_IN_BYTES = "COSMOS.MAX_HTTP_BODY_LENGTH_IN_BYTES"; private static final String MAX_HTTP_INITIAL_LINE_LENGTH_IN_BYTES = "COSMOS.MAX_HTTP_INITIAL_LINE_LENGTH_IN_BYTES"; private static final String MAX_HTTP_CHUNK_SIZE_IN_BYTES = "COSMOS.MAX_HTTP_CHUNK_SIZE_IN_BYTES"; private static final String MAX_HTTP_HEADER_SIZE_IN_BYTES = "COSMOS.MAX_HTTP_HEADER_SIZE_IN_BYTES"; private static final String MAX_DIRECT_HTTPS_POOL_SIZE = "COSMOS.MAX_DIRECT_HTTP_CONNECTION_LIMIT"; private static final String HTTP_RESPONSE_TIMEOUT_IN_SECONDS = "COSMOS.HTTP_RESPONSE_TIMEOUT_IN_SECONDS"; private static final String QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS = "COSMOS.QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS"; private static final String ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS = "COSMOS.ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS"; public static final String NON_IDEMPOTENT_WRITE_RETRY_POLICY = "COSMOS.WRITE_RETRY_POLICY"; public static final String NON_IDEMPOTENT_WRITE_RETRY_POLICY_VARIABLE = "COSMOS_WRITE_RETRY_POLICY"; private static final String CLIENT_TELEMETRY_PROXY_OPTIONS_CONFIG = "COSMOS.CLIENT_TELEMETRY_PROXY_OPTIONS_CONFIG"; private static final String CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS = "COSMOS.CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS"; private static final String CLIENT_TELEMETRY_ENDPOINT = "COSMOS.CLIENT_TELEMETRY_ENDPOINT"; private static final String ENVIRONMENT_NAME = "COSMOS.ENVIRONMENT_NAME"; private static final String QUERYPLAN_CACHING_ENABLED = "COSMOS.QUERYPLAN_CACHING_ENABLED"; private static final int DEFAULT_CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS = 10 * 60; private static final int DEFAULT_UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS = 5 * 60; private static final int DEFAULT_MAX_HTTP_BODY_LENGTH_IN_BYTES = 6 * 1024 * 1024; private static final int DEFAULT_MAX_HTTP_INITIAL_LINE_LENGTH = 4096; private static final int DEFAULT_MAX_HTTP_CHUNK_SIZE_IN_BYTES = 8192; private static final int DEFAULT_MAX_HTTP_REQUEST_HEADER_SIZE = 32 * 1024; private static final int MAX_NUMBER_OF_READ_BARRIER_READ_RETRIES = 6; private static final int MAX_NUMBER_OF_PRIMARY_READ_RETRIES = 6; private static final int MAX_NUMBER_OF_READ_QUORUM_RETRIES = 6; private static final int DELAY_BETWEEN_READ_BARRIER_CALLS_IN_MS = 5; private static final int MAX_BARRIER_RETRIES_FOR_MULTI_REGION = 30; private static final int BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION = 30; private static final int MAX_SHORT_BARRIER_RETRIES_FOR_MULTI_REGION = 4; private static final int SHORT_BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION = 10; private static final int CPU_CNT = Runtime.getRuntime().availableProcessors(); private static final int DEFAULT_DIRECT_HTTPS_POOL_SIZE = CPU_CNT * 500; private static final int DEFAULT_GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS = 2 * 60; private static final Duration MAX_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60); private static final Duration CONNECTION_ACQUIRE_TIMEOUT = Duration.ofSeconds(45); private static final int REACTOR_NETTY_MAX_CONNECTION_POOL_SIZE = 1000; private static final String REACTOR_NETTY_CONNECTION_POOL_NAME = "reactor-netty-connection-pool"; private static final int DEFAULT_HTTP_RESPONSE_TIMEOUT_IN_SECONDS = 60; private static final int DEFAULT_QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS = 5; private static final int DEFAULT_ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS = 5; private static final String DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS_NAME = "COSMOS.DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS"; private static final int DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS = 5000; private static final String DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS_NAME = "COSMOS.DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS"; private static final int DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS = 5; private static final String DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS_NAME = "COSMOS.DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS"; private static final int DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS = 50; private static final String SWITCH_OFF_IO_THREAD_FOR_RESPONSE_NAME = "COSMOS.SWITCH_OFF_IO_THREAD_FOR_RESPONSE"; private static final boolean DEFAULT_SWITCH_OFF_IO_THREAD_FOR_RESPONSE = false; private static final String QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED = "COSMOS.QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED"; private static final boolean DEFAULT_QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED = false; private static final String USE_LEGACY_TRACING = "COSMOS.USE_LEGACY_TRACING"; private static final boolean DEFAULT_USE_LEGACY_TRACING = false; private static final String REPLICA_ADDRESS_VALIDATION_ENABLED = "COSMOS.REPLICA_ADDRESS_VALIDATION_ENABLED"; private static final boolean DEFAULT_REPLICA_ADDRESS_VALIDATION_ENABLED = true; private static final String TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED = "COSMOS.TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED"; private static final boolean DEFAULT_TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED = true; private static final String MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT = "COSMOS.MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT"; private static final int DEFAULT_MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT = 1; private static final String AGGRESSIVE_WARMUP_CONCURRENCY = "COSMOS.AGGRESSIVE_WARMUP_CONCURRENCY"; private static final int DEFAULT_AGGRESSIVE_WARMUP_CONCURRENCY = Configs.getCPUCnt(); private static final String DEFENSIVE_WARMUP_CONCURRENCY = "COSMOS.DEFENSIVE_WARMUP_CONCURRENCY"; private static final int DEFAULT_DEFENSIVE_WARMUP_CONCURRENCY = 1; public Configs() { this.sslContext = sslContextInit(); } public static int getCPUCnt() { return CPU_CNT; } private SslContext sslContextInit() { try { SslProvider sslProvider = SslContext.defaultClientProvider(); return SslContextBuilder.forClient().sslProvider(sslProvider).build(); } catch (SSLException sslException) { logger.error("Fatal error cannot instantiate ssl context due to {}", sslException.getMessage(), sslException); throw new IllegalStateException(sslException); } } public SslContext getSslContext() { return this.sslContext; } public Protocol getProtocol() { String protocol = System.getProperty(PROTOCOL_PROPERTY, firstNonNull( emptyToNull(System.getenv().get(PROTOCOL_ENVIRONMENT_VARIABLE)), DEFAULT_PROTOCOL.name())); try { return Protocol.valueOf(protocol.toUpperCase(Locale.ROOT)); } catch (Exception e) { logger.error("Parsing protocol {} failed. Using the default {}.", protocol, DEFAULT_PROTOCOL, e); return DEFAULT_PROTOCOL; } } public int getMaxNumberOfReadBarrierReadRetries() { return MAX_NUMBER_OF_READ_BARRIER_READ_RETRIES; } public int getMaxNumberOfPrimaryReadRetries() { return MAX_NUMBER_OF_PRIMARY_READ_RETRIES; } public int getMaxNumberOfReadQuorumRetries() { return MAX_NUMBER_OF_READ_QUORUM_RETRIES; } public int getDelayBetweenReadBarrierCallsInMs() { return DELAY_BETWEEN_READ_BARRIER_CALLS_IN_MS; } public int getMaxBarrierRetriesForMultiRegion() { return MAX_BARRIER_RETRIES_FOR_MULTI_REGION; } public int getBarrierRetryIntervalInMsForMultiRegion() { return BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION; } public int getMaxShortBarrierRetriesForMultiRegion() { return MAX_SHORT_BARRIER_RETRIES_FOR_MULTI_REGION; } public int getShortBarrierRetryIntervalInMsForMultiRegion() { return SHORT_BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION; } public int getDirectHttpsMaxConnectionLimit() { return getJVMConfigAsInt(MAX_DIRECT_HTTPS_POOL_SIZE, DEFAULT_DIRECT_HTTPS_POOL_SIZE); } public int getMaxHttpHeaderSize() { return getJVMConfigAsInt(MAX_HTTP_HEADER_SIZE_IN_BYTES, DEFAULT_MAX_HTTP_REQUEST_HEADER_SIZE); } public int getMaxHttpInitialLineLength() { return getJVMConfigAsInt(MAX_HTTP_INITIAL_LINE_LENGTH_IN_BYTES, DEFAULT_MAX_HTTP_INITIAL_LINE_LENGTH); } public int getMaxHttpChunkSize() { return getJVMConfigAsInt(MAX_HTTP_CHUNK_SIZE_IN_BYTES, DEFAULT_MAX_HTTP_CHUNK_SIZE_IN_BYTES); } public int getMaxHttpBodyLength() { return getJVMConfigAsInt(MAX_HTTP_BODY_LENGTH_IN_BYTES, DEFAULT_MAX_HTTP_BODY_LENGTH_IN_BYTES); } public int getUnavailableLocationsExpirationTimeInSeconds() { return getJVMConfigAsInt(UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS, DEFAULT_UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS); } public static int getClientTelemetrySchedulingInSec() { return getJVMConfigAsInt(CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS, DEFAULT_CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS); } public int getGlobalEndpointManagerMaxInitializationTimeInSeconds() { return getJVMConfigAsInt(GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS, DEFAULT_GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS); } public String getReactorNettyConnectionPoolName() { return REACTOR_NETTY_CONNECTION_POOL_NAME; } public Duration getMaxIdleConnectionTimeout() { return MAX_IDLE_CONNECTION_TIMEOUT; } public Duration getConnectionAcquireTimeout() { return CONNECTION_ACQUIRE_TIMEOUT; } public int getReactorNettyMaxConnectionPoolSize() { return REACTOR_NETTY_MAX_CONNECTION_POOL_SIZE; } public static int getHttpResponseTimeoutInSeconds() { return getJVMConfigAsInt(HTTP_RESPONSE_TIMEOUT_IN_SECONDS, DEFAULT_HTTP_RESPONSE_TIMEOUT_IN_SECONDS); } public static int getQueryPlanResponseTimeoutInSeconds() { return getJVMConfigAsInt(QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS, DEFAULT_QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS); } public static String getClientTelemetryEndpoint() { return System.getProperty(CLIENT_TELEMETRY_ENDPOINT); } public static String getClientTelemetryProxyOptionsConfig() { return System.getProperty(CLIENT_TELEMETRY_PROXY_OPTIONS_CONFIG); } public static String getNonIdempotentWriteRetryPolicy() { String valueFromSystemProperty = System.getProperty(NON_IDEMPOTENT_WRITE_RETRY_POLICY); if (valueFromSystemProperty != null && !valueFromSystemProperty.isEmpty()) { return valueFromSystemProperty; } return System.getenv(NON_IDEMPOTENT_WRITE_RETRY_POLICY_VARIABLE); } public static String getEnvironmentName() { return System.getProperty(ENVIRONMENT_NAME); } public static boolean isQueryPlanCachingEnabled() { return getJVMConfigAsBoolean(QUERYPLAN_CACHING_ENABLED, true); } public static int getAddressRefreshResponseTimeoutInSeconds() { return getJVMConfigAsInt(ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS, DEFAULT_ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS); } public static int getSessionTokenMismatchDefaultWaitTimeInMs() { return getJVMConfigAsInt( DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS_NAME, DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS); } public static int getSessionTokenMismatchInitialBackoffTimeInMs() { return getJVMConfigAsInt( DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS_NAME, DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS); } public static int getSessionTokenMismatchMaximumBackoffTimeInMs() { return getJVMConfigAsInt( DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS_NAME, DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS); } public static int getSpeculationType() { return getJVMConfigAsInt(SPECULATION_TYPE, 0); } public static int speculationThresholdStep() { return getJVMConfigAsInt(SPECULATION_THRESHOLD_STEP, 100); } public static boolean shouldSwitchOffIOThreadForResponse() { return getJVMConfigAsBoolean( SWITCH_OFF_IO_THREAD_FOR_RESPONSE_NAME, DEFAULT_SWITCH_OFF_IO_THREAD_FOR_RESPONSE); } public static boolean isEmptyPageDiagnosticsEnabled() { return getJVMConfigAsBoolean( QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED, DEFAULT_QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED); } public static boolean useLegacyTracing() { return getJVMConfigAsBoolean( USE_LEGACY_TRACING, DEFAULT_USE_LEGACY_TRACING); } private static int getJVMConfigAsInt(String propName, int defaultValue) { String propValue = System.getProperty(propName); return getIntValue(propValue, defaultValue); } private static boolean getJVMConfigAsBoolean(String propName, boolean defaultValue) { String propValue = System.getProperty(propName); return getBooleanValue(propValue, defaultValue); } private static int getIntValue(String val, int defaultValue) { if (StringUtils.isEmpty(val)) { return defaultValue; } else { return Integer.valueOf(val); } } private static boolean getBooleanValue(String val, boolean defaultValue) { if (StringUtils.isEmpty(val)) { return defaultValue; } else { return Boolean.valueOf(val); } } public static boolean isReplicaAddressValidationEnabled() { return getJVMConfigAsBoolean( REPLICA_ADDRESS_VALIDATION_ENABLED, DEFAULT_REPLICA_ADDRESS_VALIDATION_ENABLED); } public static boolean isTcpHealthCheckTimeoutDetectionEnabled() { return getJVMConfigAsBoolean( TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED, DEFAULT_TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED); } public static int getMinConnectionPoolSizePerEndpoint() { return getIntValue(System.getProperty(MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT), DEFAULT_MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT); } public static int getDefensiveWarmupConcurrency() { return getIntValue(System.getProperty(DEFENSIVE_WARMUP_CONCURRENCY), DEFAULT_DEFENSIVE_WARMUP_CONCURRENCY); } public static int getAggressiveWarmupConcurrency() { return getIntValue(System.getProperty(AGGRESSIVE_WARMUP_CONCURRENCY), DEFAULT_AGGRESSIVE_WARMUP_CONCURRENCY); } }
class Configs { private static final Logger logger = LoggerFactory.getLogger(Configs.class); /** * Integer value specifying the speculation type * <pre> * 0 - No speculation * 1 - Threshold based speculation * </pre> */ public static final String SPECULATION_TYPE = "COSMOS_SPECULATION_TYPE"; public static final String SPECULATION_THRESHOLD = "COSMOS_SPECULATION_THRESHOLD"; public static final String SPECULATION_THRESHOLD_STEP = "COSMOS_SPECULATION_THRESHOLD_STEP"; private final SslContext sslContext; private static final String PROTOCOL_ENVIRONMENT_VARIABLE = "AZURE_COSMOS_DIRECT_MODE_PROTOCOL"; private static final String PROTOCOL_PROPERTY = "azure.cosmos.directModeProtocol"; private static final Protocol DEFAULT_PROTOCOL = Protocol.TCP; private static final String UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS = "COSMOS.UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS"; private static final String GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS = "COSMOS.GLOBAL_ENDPOINT_MANAGER_MAX_INIT_TIME_IN_SECONDS"; private static final String MAX_HTTP_BODY_LENGTH_IN_BYTES = "COSMOS.MAX_HTTP_BODY_LENGTH_IN_BYTES"; private static final String MAX_HTTP_INITIAL_LINE_LENGTH_IN_BYTES = "COSMOS.MAX_HTTP_INITIAL_LINE_LENGTH_IN_BYTES"; private static final String MAX_HTTP_CHUNK_SIZE_IN_BYTES = "COSMOS.MAX_HTTP_CHUNK_SIZE_IN_BYTES"; private static final String MAX_HTTP_HEADER_SIZE_IN_BYTES = "COSMOS.MAX_HTTP_HEADER_SIZE_IN_BYTES"; private static final String MAX_DIRECT_HTTPS_POOL_SIZE = "COSMOS.MAX_DIRECT_HTTP_CONNECTION_LIMIT"; private static final String HTTP_RESPONSE_TIMEOUT_IN_SECONDS = "COSMOS.HTTP_RESPONSE_TIMEOUT_IN_SECONDS"; private static final String QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS = "COSMOS.QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS"; private static final String ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS = "COSMOS.ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS"; public static final String NON_IDEMPOTENT_WRITE_RETRY_POLICY = "COSMOS.WRITE_RETRY_POLICY"; public static final String NON_IDEMPOTENT_WRITE_RETRY_POLICY_VARIABLE = "COSMOS_WRITE_RETRY_POLICY"; private static final String CLIENT_TELEMETRY_PROXY_OPTIONS_CONFIG = "COSMOS.CLIENT_TELEMETRY_PROXY_OPTIONS_CONFIG"; private static final String CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS = "COSMOS.CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS"; private static final String CLIENT_TELEMETRY_ENDPOINT = "COSMOS.CLIENT_TELEMETRY_ENDPOINT"; private static final String ENVIRONMENT_NAME = "COSMOS.ENVIRONMENT_NAME"; private static final String QUERYPLAN_CACHING_ENABLED = "COSMOS.QUERYPLAN_CACHING_ENABLED"; private static final int DEFAULT_CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS = 10 * 60; private static final int DEFAULT_UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS = 5 * 60; private static final int DEFAULT_MAX_HTTP_BODY_LENGTH_IN_BYTES = 6 * 1024 * 1024; private static final int DEFAULT_MAX_HTTP_INITIAL_LINE_LENGTH = 4096; private static final int DEFAULT_MAX_HTTP_CHUNK_SIZE_IN_BYTES = 8192; private static final int DEFAULT_MAX_HTTP_REQUEST_HEADER_SIZE = 32 * 1024; private static final int MAX_NUMBER_OF_READ_BARRIER_READ_RETRIES = 6; private static final int MAX_NUMBER_OF_PRIMARY_READ_RETRIES = 6; private static final int MAX_NUMBER_OF_READ_QUORUM_RETRIES = 6; private static final int DELAY_BETWEEN_READ_BARRIER_CALLS_IN_MS = 5; private static final int MAX_BARRIER_RETRIES_FOR_MULTI_REGION = 30; private static final int BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION = 30; private static final int MAX_SHORT_BARRIER_RETRIES_FOR_MULTI_REGION = 4; private static final int SHORT_BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION = 10; private static final int CPU_CNT = Runtime.getRuntime().availableProcessors(); private static final int DEFAULT_DIRECT_HTTPS_POOL_SIZE = CPU_CNT * 500; private static final int DEFAULT_GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS = 2 * 60; private static final Duration MAX_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60); private static final Duration CONNECTION_ACQUIRE_TIMEOUT = Duration.ofSeconds(45); private static final int REACTOR_NETTY_MAX_CONNECTION_POOL_SIZE = 1000; private static final String REACTOR_NETTY_CONNECTION_POOL_NAME = "reactor-netty-connection-pool"; private static final int DEFAULT_HTTP_RESPONSE_TIMEOUT_IN_SECONDS = 60; private static final int DEFAULT_QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS = 5; private static final int DEFAULT_ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS = 5; private static final String DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS_NAME = "COSMOS.DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS"; private static final int DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS = 5000; private static final String DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS_NAME = "COSMOS.DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS"; private static final int DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS = 5; private static final String DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS_NAME = "COSMOS.DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS"; private static final int DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS = 50; private static final String SWITCH_OFF_IO_THREAD_FOR_RESPONSE_NAME = "COSMOS.SWITCH_OFF_IO_THREAD_FOR_RESPONSE"; private static final boolean DEFAULT_SWITCH_OFF_IO_THREAD_FOR_RESPONSE = false; private static final String QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED = "COSMOS.QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED"; private static final boolean DEFAULT_QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED = false; private static final String USE_LEGACY_TRACING = "COSMOS.USE_LEGACY_TRACING"; private static final boolean DEFAULT_USE_LEGACY_TRACING = false; private static final String REPLICA_ADDRESS_VALIDATION_ENABLED = "COSMOS.REPLICA_ADDRESS_VALIDATION_ENABLED"; private static final boolean DEFAULT_REPLICA_ADDRESS_VALIDATION_ENABLED = true; private static final String TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED = "COSMOS.TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED"; private static final boolean DEFAULT_TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED = true; private static final String MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT = "COSMOS.MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT"; private static final int DEFAULT_MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT = 1; private static final String AGGRESSIVE_WARMUP_CONCURRENCY = "COSMOS.AGGRESSIVE_WARMUP_CONCURRENCY"; private static final int DEFAULT_AGGRESSIVE_WARMUP_CONCURRENCY = Configs.getCPUCnt(); private static final String DEFENSIVE_WARMUP_CONCURRENCY = "COSMOS.DEFENSIVE_WARMUP_CONCURRENCY"; private static final int DEFAULT_DEFENSIVE_WARMUP_CONCURRENCY = 1; public Configs() { this.sslContext = sslContextInit(); } public static int getCPUCnt() { return CPU_CNT; } private SslContext sslContextInit() { try { SslProvider sslProvider = SslContext.defaultClientProvider(); return SslContextBuilder.forClient().sslProvider(sslProvider).build(); } catch (SSLException sslException) { logger.error("Fatal error cannot instantiate ssl context due to {}", sslException.getMessage(), sslException); throw new IllegalStateException(sslException); } } public SslContext getSslContext() { return this.sslContext; } public Protocol getProtocol() { String protocol = System.getProperty(PROTOCOL_PROPERTY, firstNonNull( emptyToNull(System.getenv().get(PROTOCOL_ENVIRONMENT_VARIABLE)), DEFAULT_PROTOCOL.name())); try { return Protocol.valueOf(protocol.toUpperCase(Locale.ROOT)); } catch (Exception e) { logger.error("Parsing protocol {} failed. Using the default {}.", protocol, DEFAULT_PROTOCOL, e); return DEFAULT_PROTOCOL; } } public int getMaxNumberOfReadBarrierReadRetries() { return MAX_NUMBER_OF_READ_BARRIER_READ_RETRIES; } public int getMaxNumberOfPrimaryReadRetries() { return MAX_NUMBER_OF_PRIMARY_READ_RETRIES; } public int getMaxNumberOfReadQuorumRetries() { return MAX_NUMBER_OF_READ_QUORUM_RETRIES; } public int getDelayBetweenReadBarrierCallsInMs() { return DELAY_BETWEEN_READ_BARRIER_CALLS_IN_MS; } public int getMaxBarrierRetriesForMultiRegion() { return MAX_BARRIER_RETRIES_FOR_MULTI_REGION; } public int getBarrierRetryIntervalInMsForMultiRegion() { return BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION; } public int getMaxShortBarrierRetriesForMultiRegion() { return MAX_SHORT_BARRIER_RETRIES_FOR_MULTI_REGION; } public int getShortBarrierRetryIntervalInMsForMultiRegion() { return SHORT_BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION; } public int getDirectHttpsMaxConnectionLimit() { return getJVMConfigAsInt(MAX_DIRECT_HTTPS_POOL_SIZE, DEFAULT_DIRECT_HTTPS_POOL_SIZE); } public int getMaxHttpHeaderSize() { return getJVMConfigAsInt(MAX_HTTP_HEADER_SIZE_IN_BYTES, DEFAULT_MAX_HTTP_REQUEST_HEADER_SIZE); } public int getMaxHttpInitialLineLength() { return getJVMConfigAsInt(MAX_HTTP_INITIAL_LINE_LENGTH_IN_BYTES, DEFAULT_MAX_HTTP_INITIAL_LINE_LENGTH); } public int getMaxHttpChunkSize() { return getJVMConfigAsInt(MAX_HTTP_CHUNK_SIZE_IN_BYTES, DEFAULT_MAX_HTTP_CHUNK_SIZE_IN_BYTES); } public int getMaxHttpBodyLength() { return getJVMConfigAsInt(MAX_HTTP_BODY_LENGTH_IN_BYTES, DEFAULT_MAX_HTTP_BODY_LENGTH_IN_BYTES); } public int getUnavailableLocationsExpirationTimeInSeconds() { return getJVMConfigAsInt(UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS, DEFAULT_UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS); } public static int getClientTelemetrySchedulingInSec() { return getJVMConfigAsInt(CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS, DEFAULT_CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS); } public int getGlobalEndpointManagerMaxInitializationTimeInSeconds() { return getJVMConfigAsInt(GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS, DEFAULT_GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS); } public String getReactorNettyConnectionPoolName() { return REACTOR_NETTY_CONNECTION_POOL_NAME; } public Duration getMaxIdleConnectionTimeout() { return MAX_IDLE_CONNECTION_TIMEOUT; } public Duration getConnectionAcquireTimeout() { return CONNECTION_ACQUIRE_TIMEOUT; } public int getReactorNettyMaxConnectionPoolSize() { return REACTOR_NETTY_MAX_CONNECTION_POOL_SIZE; } public static int getHttpResponseTimeoutInSeconds() { return getJVMConfigAsInt(HTTP_RESPONSE_TIMEOUT_IN_SECONDS, DEFAULT_HTTP_RESPONSE_TIMEOUT_IN_SECONDS); } public static int getQueryPlanResponseTimeoutInSeconds() { return getJVMConfigAsInt(QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS, DEFAULT_QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS); } public static String getClientTelemetryEndpoint() { return System.getProperty(CLIENT_TELEMETRY_ENDPOINT); } public static String getClientTelemetryProxyOptionsConfig() { return System.getProperty(CLIENT_TELEMETRY_PROXY_OPTIONS_CONFIG); } public static String getNonIdempotentWriteRetryPolicy() { String valueFromSystemProperty = System.getProperty(NON_IDEMPOTENT_WRITE_RETRY_POLICY); if (valueFromSystemProperty != null && !valueFromSystemProperty.isEmpty()) { return valueFromSystemProperty; } return System.getenv(NON_IDEMPOTENT_WRITE_RETRY_POLICY_VARIABLE); } public static String getEnvironmentName() { return System.getProperty(ENVIRONMENT_NAME); } public static boolean isQueryPlanCachingEnabled() { return getJVMConfigAsBoolean(QUERYPLAN_CACHING_ENABLED, true); } public static int getAddressRefreshResponseTimeoutInSeconds() { return getJVMConfigAsInt(ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS, DEFAULT_ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS); } public static int getSessionTokenMismatchDefaultWaitTimeInMs() { return getJVMConfigAsInt( DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS_NAME, DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS); } public static int getSessionTokenMismatchInitialBackoffTimeInMs() { return getJVMConfigAsInt( DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS_NAME, DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS); } public static int getSessionTokenMismatchMaximumBackoffTimeInMs() { return getJVMConfigAsInt( DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS_NAME, DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS); } public static int getSpeculationType() { return getJVMConfigAsInt(SPECULATION_TYPE, 0); } public static int speculationThresholdStep() { return getJVMConfigAsInt(SPECULATION_THRESHOLD_STEP, 100); } public static boolean shouldSwitchOffIOThreadForResponse() { return getJVMConfigAsBoolean( SWITCH_OFF_IO_THREAD_FOR_RESPONSE_NAME, DEFAULT_SWITCH_OFF_IO_THREAD_FOR_RESPONSE); } public static boolean isEmptyPageDiagnosticsEnabled() { return getJVMConfigAsBoolean( QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED, DEFAULT_QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED); } public static boolean useLegacyTracing() { return getJVMConfigAsBoolean( USE_LEGACY_TRACING, DEFAULT_USE_LEGACY_TRACING); } private static int getJVMConfigAsInt(String propName, int defaultValue) { String propValue = System.getProperty(propName); return getIntValue(propValue, defaultValue); } private static boolean getJVMConfigAsBoolean(String propName, boolean defaultValue) { String propValue = System.getProperty(propName); return getBooleanValue(propValue, defaultValue); } private static int getIntValue(String val, int defaultValue) { if (StringUtils.isEmpty(val)) { return defaultValue; } else { return Integer.valueOf(val); } } private static boolean getBooleanValue(String val, boolean defaultValue) { if (StringUtils.isEmpty(val)) { return defaultValue; } else { return Boolean.valueOf(val); } } public static boolean isReplicaAddressValidationEnabled() { return getJVMConfigAsBoolean( REPLICA_ADDRESS_VALIDATION_ENABLED, DEFAULT_REPLICA_ADDRESS_VALIDATION_ENABLED); } public static boolean isTcpHealthCheckTimeoutDetectionEnabled() { return getJVMConfigAsBoolean( TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED, DEFAULT_TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED); } public static int getMinConnectionPoolSizePerEndpoint() { return getIntValue(System.getProperty(MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT), DEFAULT_MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT); } public static int getDefensiveWarmupConcurrency() { return getIntValue(System.getProperty(DEFENSIVE_WARMUP_CONCURRENCY), DEFAULT_DEFENSIVE_WARMUP_CONCURRENCY); } public static int getAggressiveWarmupConcurrency() { return getIntValue(System.getProperty(AGGRESSIVE_WARMUP_CONCURRENCY), DEFAULT_AGGRESSIVE_WARMUP_CONCURRENCY); } }
should this by default be 1?
public static int getSpeculationType() { return getJVMConfigAsInt(SPECULATION_TYPE, 0); }
return getJVMConfigAsInt(SPECULATION_TYPE, 0);
public static int getSpeculationType() { return getJVMConfigAsInt(SPECULATION_TYPE, 0); }
class Configs { private static final Logger logger = LoggerFactory.getLogger(Configs.class); /** * Integer value specifying the speculation type * <pre> * 0 - No speculation * 1 - Threshold based speculation * </pre> */ public static final String SPECULATION_TYPE = "COSMOS_SPECULATION_TYPE"; public static final String SPECULATION_THRESHOLD = "COSMOS_SPECULATION_THRESHOLD"; public static final String SPECULATION_THRESHOLD_STEP = "COSMOS_SPECULATION_THRESHOLD_STEP"; private final SslContext sslContext; private static final String PROTOCOL_ENVIRONMENT_VARIABLE = "AZURE_COSMOS_DIRECT_MODE_PROTOCOL"; private static final String PROTOCOL_PROPERTY = "azure.cosmos.directModeProtocol"; private static final Protocol DEFAULT_PROTOCOL = Protocol.TCP; private static final String UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS = "COSMOS.UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS"; private static final String GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS = "COSMOS.GLOBAL_ENDPOINT_MANAGER_MAX_INIT_TIME_IN_SECONDS"; private static final String MAX_HTTP_BODY_LENGTH_IN_BYTES = "COSMOS.MAX_HTTP_BODY_LENGTH_IN_BYTES"; private static final String MAX_HTTP_INITIAL_LINE_LENGTH_IN_BYTES = "COSMOS.MAX_HTTP_INITIAL_LINE_LENGTH_IN_BYTES"; private static final String MAX_HTTP_CHUNK_SIZE_IN_BYTES = "COSMOS.MAX_HTTP_CHUNK_SIZE_IN_BYTES"; private static final String MAX_HTTP_HEADER_SIZE_IN_BYTES = "COSMOS.MAX_HTTP_HEADER_SIZE_IN_BYTES"; private static final String MAX_DIRECT_HTTPS_POOL_SIZE = "COSMOS.MAX_DIRECT_HTTP_CONNECTION_LIMIT"; private static final String HTTP_RESPONSE_TIMEOUT_IN_SECONDS = "COSMOS.HTTP_RESPONSE_TIMEOUT_IN_SECONDS"; private static final String QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS = "COSMOS.QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS"; private static final String ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS = "COSMOS.ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS"; public static final String NON_IDEMPOTENT_WRITE_RETRY_POLICY = "COSMOS.WRITE_RETRY_POLICY"; public static final String NON_IDEMPOTENT_WRITE_RETRY_POLICY_VARIABLE = "COSMOS_WRITE_RETRY_POLICY"; private static final String CLIENT_TELEMETRY_PROXY_OPTIONS_CONFIG = "COSMOS.CLIENT_TELEMETRY_PROXY_OPTIONS_CONFIG"; private static final String CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS = "COSMOS.CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS"; private static final String CLIENT_TELEMETRY_ENDPOINT = "COSMOS.CLIENT_TELEMETRY_ENDPOINT"; private static final String ENVIRONMENT_NAME = "COSMOS.ENVIRONMENT_NAME"; private static final String QUERYPLAN_CACHING_ENABLED = "COSMOS.QUERYPLAN_CACHING_ENABLED"; private static final int DEFAULT_CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS = 10 * 60; private static final int DEFAULT_UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS = 5 * 60; private static final int DEFAULT_MAX_HTTP_BODY_LENGTH_IN_BYTES = 6 * 1024 * 1024; private static final int DEFAULT_MAX_HTTP_INITIAL_LINE_LENGTH = 4096; private static final int DEFAULT_MAX_HTTP_CHUNK_SIZE_IN_BYTES = 8192; private static final int DEFAULT_MAX_HTTP_REQUEST_HEADER_SIZE = 32 * 1024; private static final int MAX_NUMBER_OF_READ_BARRIER_READ_RETRIES = 6; private static final int MAX_NUMBER_OF_PRIMARY_READ_RETRIES = 6; private static final int MAX_NUMBER_OF_READ_QUORUM_RETRIES = 6; private static final int DELAY_BETWEEN_READ_BARRIER_CALLS_IN_MS = 5; private static final int MAX_BARRIER_RETRIES_FOR_MULTI_REGION = 30; private static final int BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION = 30; private static final int MAX_SHORT_BARRIER_RETRIES_FOR_MULTI_REGION = 4; private static final int SHORT_BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION = 10; private static final int CPU_CNT = Runtime.getRuntime().availableProcessors(); private static final int DEFAULT_DIRECT_HTTPS_POOL_SIZE = CPU_CNT * 500; private static final int DEFAULT_GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS = 2 * 60; private static final Duration MAX_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60); private static final Duration CONNECTION_ACQUIRE_TIMEOUT = Duration.ofSeconds(45); private static final int REACTOR_NETTY_MAX_CONNECTION_POOL_SIZE = 1000; private static final String REACTOR_NETTY_CONNECTION_POOL_NAME = "reactor-netty-connection-pool"; private static final int DEFAULT_HTTP_RESPONSE_TIMEOUT_IN_SECONDS = 60; private static final int DEFAULT_QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS = 5; private static final int DEFAULT_ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS = 5; private static final String DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS_NAME = "COSMOS.DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS"; private static final int DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS = 5000; private static final String DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS_NAME = "COSMOS.DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS"; private static final int DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS = 5; private static final String DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS_NAME = "COSMOS.DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS"; private static final int DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS = 50; private static final String SWITCH_OFF_IO_THREAD_FOR_RESPONSE_NAME = "COSMOS.SWITCH_OFF_IO_THREAD_FOR_RESPONSE"; private static final boolean DEFAULT_SWITCH_OFF_IO_THREAD_FOR_RESPONSE = false; private static final String QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED = "COSMOS.QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED"; private static final boolean DEFAULT_QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED = false; private static final String USE_LEGACY_TRACING = "COSMOS.USE_LEGACY_TRACING"; private static final boolean DEFAULT_USE_LEGACY_TRACING = false; private static final String REPLICA_ADDRESS_VALIDATION_ENABLED = "COSMOS.REPLICA_ADDRESS_VALIDATION_ENABLED"; private static final boolean DEFAULT_REPLICA_ADDRESS_VALIDATION_ENABLED = true; private static final String TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED = "COSMOS.TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED"; private static final boolean DEFAULT_TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED = true; private static final String MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT = "COSMOS.MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT"; private static final int DEFAULT_MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT = 1; private static final String AGGRESSIVE_WARMUP_CONCURRENCY = "COSMOS.AGGRESSIVE_WARMUP_CONCURRENCY"; private static final int DEFAULT_AGGRESSIVE_WARMUP_CONCURRENCY = Configs.getCPUCnt(); private static final String DEFENSIVE_WARMUP_CONCURRENCY = "COSMOS.DEFENSIVE_WARMUP_CONCURRENCY"; private static final int DEFAULT_DEFENSIVE_WARMUP_CONCURRENCY = 1; public Configs() { this.sslContext = sslContextInit(); } public static int getCPUCnt() { return CPU_CNT; } private SslContext sslContextInit() { try { SslProvider sslProvider = SslContext.defaultClientProvider(); return SslContextBuilder.forClient().sslProvider(sslProvider).build(); } catch (SSLException sslException) { logger.error("Fatal error cannot instantiate ssl context due to {}", sslException.getMessage(), sslException); throw new IllegalStateException(sslException); } } public SslContext getSslContext() { return this.sslContext; } public Protocol getProtocol() { String protocol = System.getProperty(PROTOCOL_PROPERTY, firstNonNull( emptyToNull(System.getenv().get(PROTOCOL_ENVIRONMENT_VARIABLE)), DEFAULT_PROTOCOL.name())); try { return Protocol.valueOf(protocol.toUpperCase(Locale.ROOT)); } catch (Exception e) { logger.error("Parsing protocol {} failed. Using the default {}.", protocol, DEFAULT_PROTOCOL, e); return DEFAULT_PROTOCOL; } } public int getMaxNumberOfReadBarrierReadRetries() { return MAX_NUMBER_OF_READ_BARRIER_READ_RETRIES; } public int getMaxNumberOfPrimaryReadRetries() { return MAX_NUMBER_OF_PRIMARY_READ_RETRIES; } public int getMaxNumberOfReadQuorumRetries() { return MAX_NUMBER_OF_READ_QUORUM_RETRIES; } public int getDelayBetweenReadBarrierCallsInMs() { return DELAY_BETWEEN_READ_BARRIER_CALLS_IN_MS; } public int getMaxBarrierRetriesForMultiRegion() { return MAX_BARRIER_RETRIES_FOR_MULTI_REGION; } public int getBarrierRetryIntervalInMsForMultiRegion() { return BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION; } public int getMaxShortBarrierRetriesForMultiRegion() { return MAX_SHORT_BARRIER_RETRIES_FOR_MULTI_REGION; } public int getShortBarrierRetryIntervalInMsForMultiRegion() { return SHORT_BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION; } public int getDirectHttpsMaxConnectionLimit() { return getJVMConfigAsInt(MAX_DIRECT_HTTPS_POOL_SIZE, DEFAULT_DIRECT_HTTPS_POOL_SIZE); } public int getMaxHttpHeaderSize() { return getJVMConfigAsInt(MAX_HTTP_HEADER_SIZE_IN_BYTES, DEFAULT_MAX_HTTP_REQUEST_HEADER_SIZE); } public int getMaxHttpInitialLineLength() { return getJVMConfigAsInt(MAX_HTTP_INITIAL_LINE_LENGTH_IN_BYTES, DEFAULT_MAX_HTTP_INITIAL_LINE_LENGTH); } public int getMaxHttpChunkSize() { return getJVMConfigAsInt(MAX_HTTP_CHUNK_SIZE_IN_BYTES, DEFAULT_MAX_HTTP_CHUNK_SIZE_IN_BYTES); } public int getMaxHttpBodyLength() { return getJVMConfigAsInt(MAX_HTTP_BODY_LENGTH_IN_BYTES, DEFAULT_MAX_HTTP_BODY_LENGTH_IN_BYTES); } public int getUnavailableLocationsExpirationTimeInSeconds() { return getJVMConfigAsInt(UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS, DEFAULT_UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS); } public static int getClientTelemetrySchedulingInSec() { return getJVMConfigAsInt(CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS, DEFAULT_CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS); } public int getGlobalEndpointManagerMaxInitializationTimeInSeconds() { return getJVMConfigAsInt(GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS, DEFAULT_GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS); } public String getReactorNettyConnectionPoolName() { return REACTOR_NETTY_CONNECTION_POOL_NAME; } public Duration getMaxIdleConnectionTimeout() { return MAX_IDLE_CONNECTION_TIMEOUT; } public Duration getConnectionAcquireTimeout() { return CONNECTION_ACQUIRE_TIMEOUT; } public int getReactorNettyMaxConnectionPoolSize() { return REACTOR_NETTY_MAX_CONNECTION_POOL_SIZE; } public static int getHttpResponseTimeoutInSeconds() { return getJVMConfigAsInt(HTTP_RESPONSE_TIMEOUT_IN_SECONDS, DEFAULT_HTTP_RESPONSE_TIMEOUT_IN_SECONDS); } public static int getQueryPlanResponseTimeoutInSeconds() { return getJVMConfigAsInt(QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS, DEFAULT_QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS); } public static String getClientTelemetryEndpoint() { return System.getProperty(CLIENT_TELEMETRY_ENDPOINT); } public static String getClientTelemetryProxyOptionsConfig() { return System.getProperty(CLIENT_TELEMETRY_PROXY_OPTIONS_CONFIG); } public static String getNonIdempotentWriteRetryPolicy() { String valueFromSystemProperty = System.getProperty(NON_IDEMPOTENT_WRITE_RETRY_POLICY); if (valueFromSystemProperty != null && !valueFromSystemProperty.isEmpty()) { return valueFromSystemProperty; } return System.getenv(NON_IDEMPOTENT_WRITE_RETRY_POLICY_VARIABLE); } public static String getEnvironmentName() { return System.getProperty(ENVIRONMENT_NAME); } public static boolean isQueryPlanCachingEnabled() { return getJVMConfigAsBoolean(QUERYPLAN_CACHING_ENABLED, true); } public static int getAddressRefreshResponseTimeoutInSeconds() { return getJVMConfigAsInt(ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS, DEFAULT_ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS); } public static int getSessionTokenMismatchDefaultWaitTimeInMs() { return getJVMConfigAsInt( DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS_NAME, DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS); } public static int getSessionTokenMismatchInitialBackoffTimeInMs() { return getJVMConfigAsInt( DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS_NAME, DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS); } public static int getSessionTokenMismatchMaximumBackoffTimeInMs() { return getJVMConfigAsInt( DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS_NAME, DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS); } public static int speculationThreshold() { return getJVMConfigAsInt(SPECULATION_THRESHOLD, 500); } public static int speculationThresholdStep() { return getJVMConfigAsInt(SPECULATION_THRESHOLD_STEP, 100); } public static boolean shouldSwitchOffIOThreadForResponse() { return getJVMConfigAsBoolean( SWITCH_OFF_IO_THREAD_FOR_RESPONSE_NAME, DEFAULT_SWITCH_OFF_IO_THREAD_FOR_RESPONSE); } public static boolean isEmptyPageDiagnosticsEnabled() { return getJVMConfigAsBoolean( QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED, DEFAULT_QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED); } public static boolean useLegacyTracing() { return getJVMConfigAsBoolean( USE_LEGACY_TRACING, DEFAULT_USE_LEGACY_TRACING); } private static int getJVMConfigAsInt(String propName, int defaultValue) { String propValue = System.getProperty(propName); return getIntValue(propValue, defaultValue); } private static boolean getJVMConfigAsBoolean(String propName, boolean defaultValue) { String propValue = System.getProperty(propName); return getBooleanValue(propValue, defaultValue); } private static int getIntValue(String val, int defaultValue) { if (StringUtils.isEmpty(val)) { return defaultValue; } else { return Integer.valueOf(val); } } private static boolean getBooleanValue(String val, boolean defaultValue) { if (StringUtils.isEmpty(val)) { return defaultValue; } else { return Boolean.valueOf(val); } } public static boolean isReplicaAddressValidationEnabled() { return getJVMConfigAsBoolean( REPLICA_ADDRESS_VALIDATION_ENABLED, DEFAULT_REPLICA_ADDRESS_VALIDATION_ENABLED); } public static boolean isTcpHealthCheckTimeoutDetectionEnabled() { return getJVMConfigAsBoolean( TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED, DEFAULT_TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED); } public static int getMinConnectionPoolSizePerEndpoint() { return getIntValue(System.getProperty(MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT), DEFAULT_MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT); } public static int getDefensiveWarmupConcurrency() { return getIntValue(System.getProperty(DEFENSIVE_WARMUP_CONCURRENCY), DEFAULT_DEFENSIVE_WARMUP_CONCURRENCY); } public static int getAggressiveWarmupConcurrency() { return getIntValue(System.getProperty(AGGRESSIVE_WARMUP_CONCURRENCY), DEFAULT_AGGRESSIVE_WARMUP_CONCURRENCY); } }
class Configs { private static final Logger logger = LoggerFactory.getLogger(Configs.class); /** * Integer value specifying the speculation type * <pre> * 0 - No speculation * 1 - Threshold based speculation * </pre> */ public static final String SPECULATION_TYPE = "COSMOS_SPECULATION_TYPE"; public static final String SPECULATION_THRESHOLD = "COSMOS_SPECULATION_THRESHOLD"; public static final String SPECULATION_THRESHOLD_STEP = "COSMOS_SPECULATION_THRESHOLD_STEP"; private final SslContext sslContext; private static final String PROTOCOL_ENVIRONMENT_VARIABLE = "AZURE_COSMOS_DIRECT_MODE_PROTOCOL"; private static final String PROTOCOL_PROPERTY = "azure.cosmos.directModeProtocol"; private static final Protocol DEFAULT_PROTOCOL = Protocol.TCP; private static final String UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS = "COSMOS.UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS"; private static final String GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS = "COSMOS.GLOBAL_ENDPOINT_MANAGER_MAX_INIT_TIME_IN_SECONDS"; private static final String MAX_HTTP_BODY_LENGTH_IN_BYTES = "COSMOS.MAX_HTTP_BODY_LENGTH_IN_BYTES"; private static final String MAX_HTTP_INITIAL_LINE_LENGTH_IN_BYTES = "COSMOS.MAX_HTTP_INITIAL_LINE_LENGTH_IN_BYTES"; private static final String MAX_HTTP_CHUNK_SIZE_IN_BYTES = "COSMOS.MAX_HTTP_CHUNK_SIZE_IN_BYTES"; private static final String MAX_HTTP_HEADER_SIZE_IN_BYTES = "COSMOS.MAX_HTTP_HEADER_SIZE_IN_BYTES"; private static final String MAX_DIRECT_HTTPS_POOL_SIZE = "COSMOS.MAX_DIRECT_HTTP_CONNECTION_LIMIT"; private static final String HTTP_RESPONSE_TIMEOUT_IN_SECONDS = "COSMOS.HTTP_RESPONSE_TIMEOUT_IN_SECONDS"; private static final String QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS = "COSMOS.QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS"; private static final String ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS = "COSMOS.ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS"; public static final String NON_IDEMPOTENT_WRITE_RETRY_POLICY = "COSMOS.WRITE_RETRY_POLICY"; public static final String NON_IDEMPOTENT_WRITE_RETRY_POLICY_VARIABLE = "COSMOS_WRITE_RETRY_POLICY"; private static final String CLIENT_TELEMETRY_PROXY_OPTIONS_CONFIG = "COSMOS.CLIENT_TELEMETRY_PROXY_OPTIONS_CONFIG"; private static final String CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS = "COSMOS.CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS"; private static final String CLIENT_TELEMETRY_ENDPOINT = "COSMOS.CLIENT_TELEMETRY_ENDPOINT"; private static final String ENVIRONMENT_NAME = "COSMOS.ENVIRONMENT_NAME"; private static final String QUERYPLAN_CACHING_ENABLED = "COSMOS.QUERYPLAN_CACHING_ENABLED"; private static final int DEFAULT_CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS = 10 * 60; private static final int DEFAULT_UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS = 5 * 60; private static final int DEFAULT_MAX_HTTP_BODY_LENGTH_IN_BYTES = 6 * 1024 * 1024; private static final int DEFAULT_MAX_HTTP_INITIAL_LINE_LENGTH = 4096; private static final int DEFAULT_MAX_HTTP_CHUNK_SIZE_IN_BYTES = 8192; private static final int DEFAULT_MAX_HTTP_REQUEST_HEADER_SIZE = 32 * 1024; private static final int MAX_NUMBER_OF_READ_BARRIER_READ_RETRIES = 6; private static final int MAX_NUMBER_OF_PRIMARY_READ_RETRIES = 6; private static final int MAX_NUMBER_OF_READ_QUORUM_RETRIES = 6; private static final int DELAY_BETWEEN_READ_BARRIER_CALLS_IN_MS = 5; private static final int MAX_BARRIER_RETRIES_FOR_MULTI_REGION = 30; private static final int BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION = 30; private static final int MAX_SHORT_BARRIER_RETRIES_FOR_MULTI_REGION = 4; private static final int SHORT_BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION = 10; private static final int CPU_CNT = Runtime.getRuntime().availableProcessors(); private static final int DEFAULT_DIRECT_HTTPS_POOL_SIZE = CPU_CNT * 500; private static final int DEFAULT_GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS = 2 * 60; private static final Duration MAX_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60); private static final Duration CONNECTION_ACQUIRE_TIMEOUT = Duration.ofSeconds(45); private static final int REACTOR_NETTY_MAX_CONNECTION_POOL_SIZE = 1000; private static final String REACTOR_NETTY_CONNECTION_POOL_NAME = "reactor-netty-connection-pool"; private static final int DEFAULT_HTTP_RESPONSE_TIMEOUT_IN_SECONDS = 60; private static final int DEFAULT_QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS = 5; private static final int DEFAULT_ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS = 5; private static final String DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS_NAME = "COSMOS.DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS"; private static final int DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS = 5000; private static final String DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS_NAME = "COSMOS.DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS"; private static final int DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS = 5; private static final String DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS_NAME = "COSMOS.DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS"; private static final int DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS = 50; private static final String SWITCH_OFF_IO_THREAD_FOR_RESPONSE_NAME = "COSMOS.SWITCH_OFF_IO_THREAD_FOR_RESPONSE"; private static final boolean DEFAULT_SWITCH_OFF_IO_THREAD_FOR_RESPONSE = false; private static final String QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED = "COSMOS.QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED"; private static final boolean DEFAULT_QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED = false; private static final String USE_LEGACY_TRACING = "COSMOS.USE_LEGACY_TRACING"; private static final boolean DEFAULT_USE_LEGACY_TRACING = false; private static final String REPLICA_ADDRESS_VALIDATION_ENABLED = "COSMOS.REPLICA_ADDRESS_VALIDATION_ENABLED"; private static final boolean DEFAULT_REPLICA_ADDRESS_VALIDATION_ENABLED = true; private static final String TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED = "COSMOS.TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED"; private static final boolean DEFAULT_TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED = true; private static final String MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT = "COSMOS.MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT"; private static final int DEFAULT_MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT = 1; private static final String AGGRESSIVE_WARMUP_CONCURRENCY = "COSMOS.AGGRESSIVE_WARMUP_CONCURRENCY"; private static final int DEFAULT_AGGRESSIVE_WARMUP_CONCURRENCY = Configs.getCPUCnt(); private static final String DEFENSIVE_WARMUP_CONCURRENCY = "COSMOS.DEFENSIVE_WARMUP_CONCURRENCY"; private static final int DEFAULT_DEFENSIVE_WARMUP_CONCURRENCY = 1; public Configs() { this.sslContext = sslContextInit(); } public static int getCPUCnt() { return CPU_CNT; } private SslContext sslContextInit() { try { SslProvider sslProvider = SslContext.defaultClientProvider(); return SslContextBuilder.forClient().sslProvider(sslProvider).build(); } catch (SSLException sslException) { logger.error("Fatal error cannot instantiate ssl context due to {}", sslException.getMessage(), sslException); throw new IllegalStateException(sslException); } } public SslContext getSslContext() { return this.sslContext; } public Protocol getProtocol() { String protocol = System.getProperty(PROTOCOL_PROPERTY, firstNonNull( emptyToNull(System.getenv().get(PROTOCOL_ENVIRONMENT_VARIABLE)), DEFAULT_PROTOCOL.name())); try { return Protocol.valueOf(protocol.toUpperCase(Locale.ROOT)); } catch (Exception e) { logger.error("Parsing protocol {} failed. Using the default {}.", protocol, DEFAULT_PROTOCOL, e); return DEFAULT_PROTOCOL; } } public int getMaxNumberOfReadBarrierReadRetries() { return MAX_NUMBER_OF_READ_BARRIER_READ_RETRIES; } public int getMaxNumberOfPrimaryReadRetries() { return MAX_NUMBER_OF_PRIMARY_READ_RETRIES; } public int getMaxNumberOfReadQuorumRetries() { return MAX_NUMBER_OF_READ_QUORUM_RETRIES; } public int getDelayBetweenReadBarrierCallsInMs() { return DELAY_BETWEEN_READ_BARRIER_CALLS_IN_MS; } public int getMaxBarrierRetriesForMultiRegion() { return MAX_BARRIER_RETRIES_FOR_MULTI_REGION; } public int getBarrierRetryIntervalInMsForMultiRegion() { return BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION; } public int getMaxShortBarrierRetriesForMultiRegion() { return MAX_SHORT_BARRIER_RETRIES_FOR_MULTI_REGION; } public int getShortBarrierRetryIntervalInMsForMultiRegion() { return SHORT_BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION; } public int getDirectHttpsMaxConnectionLimit() { return getJVMConfigAsInt(MAX_DIRECT_HTTPS_POOL_SIZE, DEFAULT_DIRECT_HTTPS_POOL_SIZE); } public int getMaxHttpHeaderSize() { return getJVMConfigAsInt(MAX_HTTP_HEADER_SIZE_IN_BYTES, DEFAULT_MAX_HTTP_REQUEST_HEADER_SIZE); } public int getMaxHttpInitialLineLength() { return getJVMConfigAsInt(MAX_HTTP_INITIAL_LINE_LENGTH_IN_BYTES, DEFAULT_MAX_HTTP_INITIAL_LINE_LENGTH); } public int getMaxHttpChunkSize() { return getJVMConfigAsInt(MAX_HTTP_CHUNK_SIZE_IN_BYTES, DEFAULT_MAX_HTTP_CHUNK_SIZE_IN_BYTES); } public int getMaxHttpBodyLength() { return getJVMConfigAsInt(MAX_HTTP_BODY_LENGTH_IN_BYTES, DEFAULT_MAX_HTTP_BODY_LENGTH_IN_BYTES); } public int getUnavailableLocationsExpirationTimeInSeconds() { return getJVMConfigAsInt(UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS, DEFAULT_UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS); } public static int getClientTelemetrySchedulingInSec() { return getJVMConfigAsInt(CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS, DEFAULT_CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS); } public int getGlobalEndpointManagerMaxInitializationTimeInSeconds() { return getJVMConfigAsInt(GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS, DEFAULT_GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS); } public String getReactorNettyConnectionPoolName() { return REACTOR_NETTY_CONNECTION_POOL_NAME; } public Duration getMaxIdleConnectionTimeout() { return MAX_IDLE_CONNECTION_TIMEOUT; } public Duration getConnectionAcquireTimeout() { return CONNECTION_ACQUIRE_TIMEOUT; } public int getReactorNettyMaxConnectionPoolSize() { return REACTOR_NETTY_MAX_CONNECTION_POOL_SIZE; } public static int getHttpResponseTimeoutInSeconds() { return getJVMConfigAsInt(HTTP_RESPONSE_TIMEOUT_IN_SECONDS, DEFAULT_HTTP_RESPONSE_TIMEOUT_IN_SECONDS); } public static int getQueryPlanResponseTimeoutInSeconds() { return getJVMConfigAsInt(QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS, DEFAULT_QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS); } public static String getClientTelemetryEndpoint() { return System.getProperty(CLIENT_TELEMETRY_ENDPOINT); } public static String getClientTelemetryProxyOptionsConfig() { return System.getProperty(CLIENT_TELEMETRY_PROXY_OPTIONS_CONFIG); } public static String getNonIdempotentWriteRetryPolicy() { String valueFromSystemProperty = System.getProperty(NON_IDEMPOTENT_WRITE_RETRY_POLICY); if (valueFromSystemProperty != null && !valueFromSystemProperty.isEmpty()) { return valueFromSystemProperty; } return System.getenv(NON_IDEMPOTENT_WRITE_RETRY_POLICY_VARIABLE); } public static String getEnvironmentName() { return System.getProperty(ENVIRONMENT_NAME); } public static boolean isQueryPlanCachingEnabled() { return getJVMConfigAsBoolean(QUERYPLAN_CACHING_ENABLED, true); } public static int getAddressRefreshResponseTimeoutInSeconds() { return getJVMConfigAsInt(ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS, DEFAULT_ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS); } public static int getSessionTokenMismatchDefaultWaitTimeInMs() { return getJVMConfigAsInt( DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS_NAME, DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS); } public static int getSessionTokenMismatchInitialBackoffTimeInMs() { return getJVMConfigAsInt( DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS_NAME, DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS); } public static int getSessionTokenMismatchMaximumBackoffTimeInMs() { return getJVMConfigAsInt( DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS_NAME, DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS); } public static int speculationThreshold() { return getJVMConfigAsInt(SPECULATION_THRESHOLD, 500); } public static int speculationThresholdStep() { return getJVMConfigAsInt(SPECULATION_THRESHOLD_STEP, 100); } public static boolean shouldSwitchOffIOThreadForResponse() { return getJVMConfigAsBoolean( SWITCH_OFF_IO_THREAD_FOR_RESPONSE_NAME, DEFAULT_SWITCH_OFF_IO_THREAD_FOR_RESPONSE); } public static boolean isEmptyPageDiagnosticsEnabled() { return getJVMConfigAsBoolean( QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED, DEFAULT_QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED); } public static boolean useLegacyTracing() { return getJVMConfigAsBoolean( USE_LEGACY_TRACING, DEFAULT_USE_LEGACY_TRACING); } private static int getJVMConfigAsInt(String propName, int defaultValue) { String propValue = System.getProperty(propName); return getIntValue(propValue, defaultValue); } private static boolean getJVMConfigAsBoolean(String propName, boolean defaultValue) { String propValue = System.getProperty(propName); return getBooleanValue(propValue, defaultValue); } private static int getIntValue(String val, int defaultValue) { if (StringUtils.isEmpty(val)) { return defaultValue; } else { return Integer.valueOf(val); } } private static boolean getBooleanValue(String val, boolean defaultValue) { if (StringUtils.isEmpty(val)) { return defaultValue; } else { return Boolean.valueOf(val); } } public static boolean isReplicaAddressValidationEnabled() { return getJVMConfigAsBoolean( REPLICA_ADDRESS_VALIDATION_ENABLED, DEFAULT_REPLICA_ADDRESS_VALIDATION_ENABLED); } public static boolean isTcpHealthCheckTimeoutDetectionEnabled() { return getJVMConfigAsBoolean( TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED, DEFAULT_TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED); } public static int getMinConnectionPoolSizePerEndpoint() { return getIntValue(System.getProperty(MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT), DEFAULT_MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT); } public static int getDefensiveWarmupConcurrency() { return getIntValue(System.getProperty(DEFENSIVE_WARMUP_CONCURRENCY), DEFAULT_DEFENSIVE_WARMUP_CONCURRENCY); } public static int getAggressiveWarmupConcurrency() { return getIntValue(System.getProperty(AGGRESSIVE_WARMUP_CONCURRENCY), DEFAULT_AGGRESSIVE_WARMUP_CONCURRENCY); } }
No speculation by default
public static int getSpeculationType() { return getJVMConfigAsInt(SPECULATION_TYPE, 0); }
return getJVMConfigAsInt(SPECULATION_TYPE, 0);
public static int getSpeculationType() { return getJVMConfigAsInt(SPECULATION_TYPE, 0); }
class Configs { private static final Logger logger = LoggerFactory.getLogger(Configs.class); /** * Integer value specifying the speculation type * <pre> * 0 - No speculation * 1 - Threshold based speculation * </pre> */ public static final String SPECULATION_TYPE = "COSMOS_SPECULATION_TYPE"; public static final String SPECULATION_THRESHOLD = "COSMOS_SPECULATION_THRESHOLD"; public static final String SPECULATION_THRESHOLD_STEP = "COSMOS_SPECULATION_THRESHOLD_STEP"; private final SslContext sslContext; private static final String PROTOCOL_ENVIRONMENT_VARIABLE = "AZURE_COSMOS_DIRECT_MODE_PROTOCOL"; private static final String PROTOCOL_PROPERTY = "azure.cosmos.directModeProtocol"; private static final Protocol DEFAULT_PROTOCOL = Protocol.TCP; private static final String UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS = "COSMOS.UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS"; private static final String GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS = "COSMOS.GLOBAL_ENDPOINT_MANAGER_MAX_INIT_TIME_IN_SECONDS"; private static final String MAX_HTTP_BODY_LENGTH_IN_BYTES = "COSMOS.MAX_HTTP_BODY_LENGTH_IN_BYTES"; private static final String MAX_HTTP_INITIAL_LINE_LENGTH_IN_BYTES = "COSMOS.MAX_HTTP_INITIAL_LINE_LENGTH_IN_BYTES"; private static final String MAX_HTTP_CHUNK_SIZE_IN_BYTES = "COSMOS.MAX_HTTP_CHUNK_SIZE_IN_BYTES"; private static final String MAX_HTTP_HEADER_SIZE_IN_BYTES = "COSMOS.MAX_HTTP_HEADER_SIZE_IN_BYTES"; private static final String MAX_DIRECT_HTTPS_POOL_SIZE = "COSMOS.MAX_DIRECT_HTTP_CONNECTION_LIMIT"; private static final String HTTP_RESPONSE_TIMEOUT_IN_SECONDS = "COSMOS.HTTP_RESPONSE_TIMEOUT_IN_SECONDS"; private static final String QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS = "COSMOS.QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS"; private static final String ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS = "COSMOS.ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS"; public static final String NON_IDEMPOTENT_WRITE_RETRY_POLICY = "COSMOS.WRITE_RETRY_POLICY"; public static final String NON_IDEMPOTENT_WRITE_RETRY_POLICY_VARIABLE = "COSMOS_WRITE_RETRY_POLICY"; private static final String CLIENT_TELEMETRY_PROXY_OPTIONS_CONFIG = "COSMOS.CLIENT_TELEMETRY_PROXY_OPTIONS_CONFIG"; private static final String CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS = "COSMOS.CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS"; private static final String CLIENT_TELEMETRY_ENDPOINT = "COSMOS.CLIENT_TELEMETRY_ENDPOINT"; private static final String ENVIRONMENT_NAME = "COSMOS.ENVIRONMENT_NAME"; private static final String QUERYPLAN_CACHING_ENABLED = "COSMOS.QUERYPLAN_CACHING_ENABLED"; private static final int DEFAULT_CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS = 10 * 60; private static final int DEFAULT_UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS = 5 * 60; private static final int DEFAULT_MAX_HTTP_BODY_LENGTH_IN_BYTES = 6 * 1024 * 1024; private static final int DEFAULT_MAX_HTTP_INITIAL_LINE_LENGTH = 4096; private static final int DEFAULT_MAX_HTTP_CHUNK_SIZE_IN_BYTES = 8192; private static final int DEFAULT_MAX_HTTP_REQUEST_HEADER_SIZE = 32 * 1024; private static final int MAX_NUMBER_OF_READ_BARRIER_READ_RETRIES = 6; private static final int MAX_NUMBER_OF_PRIMARY_READ_RETRIES = 6; private static final int MAX_NUMBER_OF_READ_QUORUM_RETRIES = 6; private static final int DELAY_BETWEEN_READ_BARRIER_CALLS_IN_MS = 5; private static final int MAX_BARRIER_RETRIES_FOR_MULTI_REGION = 30; private static final int BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION = 30; private static final int MAX_SHORT_BARRIER_RETRIES_FOR_MULTI_REGION = 4; private static final int SHORT_BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION = 10; private static final int CPU_CNT = Runtime.getRuntime().availableProcessors(); private static final int DEFAULT_DIRECT_HTTPS_POOL_SIZE = CPU_CNT * 500; private static final int DEFAULT_GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS = 2 * 60; private static final Duration MAX_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60); private static final Duration CONNECTION_ACQUIRE_TIMEOUT = Duration.ofSeconds(45); private static final int REACTOR_NETTY_MAX_CONNECTION_POOL_SIZE = 1000; private static final String REACTOR_NETTY_CONNECTION_POOL_NAME = "reactor-netty-connection-pool"; private static final int DEFAULT_HTTP_RESPONSE_TIMEOUT_IN_SECONDS = 60; private static final int DEFAULT_QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS = 5; private static final int DEFAULT_ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS = 5; private static final String DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS_NAME = "COSMOS.DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS"; private static final int DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS = 5000; private static final String DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS_NAME = "COSMOS.DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS"; private static final int DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS = 5; private static final String DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS_NAME = "COSMOS.DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS"; private static final int DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS = 50; private static final String SWITCH_OFF_IO_THREAD_FOR_RESPONSE_NAME = "COSMOS.SWITCH_OFF_IO_THREAD_FOR_RESPONSE"; private static final boolean DEFAULT_SWITCH_OFF_IO_THREAD_FOR_RESPONSE = false; private static final String QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED = "COSMOS.QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED"; private static final boolean DEFAULT_QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED = false; private static final String USE_LEGACY_TRACING = "COSMOS.USE_LEGACY_TRACING"; private static final boolean DEFAULT_USE_LEGACY_TRACING = false; private static final String REPLICA_ADDRESS_VALIDATION_ENABLED = "COSMOS.REPLICA_ADDRESS_VALIDATION_ENABLED"; private static final boolean DEFAULT_REPLICA_ADDRESS_VALIDATION_ENABLED = true; private static final String TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED = "COSMOS.TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED"; private static final boolean DEFAULT_TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED = true; private static final String MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT = "COSMOS.MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT"; private static final int DEFAULT_MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT = 1; private static final String AGGRESSIVE_WARMUP_CONCURRENCY = "COSMOS.AGGRESSIVE_WARMUP_CONCURRENCY"; private static final int DEFAULT_AGGRESSIVE_WARMUP_CONCURRENCY = Configs.getCPUCnt(); private static final String DEFENSIVE_WARMUP_CONCURRENCY = "COSMOS.DEFENSIVE_WARMUP_CONCURRENCY"; private static final int DEFAULT_DEFENSIVE_WARMUP_CONCURRENCY = 1; public Configs() { this.sslContext = sslContextInit(); } public static int getCPUCnt() { return CPU_CNT; } private SslContext sslContextInit() { try { SslProvider sslProvider = SslContext.defaultClientProvider(); return SslContextBuilder.forClient().sslProvider(sslProvider).build(); } catch (SSLException sslException) { logger.error("Fatal error cannot instantiate ssl context due to {}", sslException.getMessage(), sslException); throw new IllegalStateException(sslException); } } public SslContext getSslContext() { return this.sslContext; } public Protocol getProtocol() { String protocol = System.getProperty(PROTOCOL_PROPERTY, firstNonNull( emptyToNull(System.getenv().get(PROTOCOL_ENVIRONMENT_VARIABLE)), DEFAULT_PROTOCOL.name())); try { return Protocol.valueOf(protocol.toUpperCase(Locale.ROOT)); } catch (Exception e) { logger.error("Parsing protocol {} failed. Using the default {}.", protocol, DEFAULT_PROTOCOL, e); return DEFAULT_PROTOCOL; } } public int getMaxNumberOfReadBarrierReadRetries() { return MAX_NUMBER_OF_READ_BARRIER_READ_RETRIES; } public int getMaxNumberOfPrimaryReadRetries() { return MAX_NUMBER_OF_PRIMARY_READ_RETRIES; } public int getMaxNumberOfReadQuorumRetries() { return MAX_NUMBER_OF_READ_QUORUM_RETRIES; } public int getDelayBetweenReadBarrierCallsInMs() { return DELAY_BETWEEN_READ_BARRIER_CALLS_IN_MS; } public int getMaxBarrierRetriesForMultiRegion() { return MAX_BARRIER_RETRIES_FOR_MULTI_REGION; } public int getBarrierRetryIntervalInMsForMultiRegion() { return BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION; } public int getMaxShortBarrierRetriesForMultiRegion() { return MAX_SHORT_BARRIER_RETRIES_FOR_MULTI_REGION; } public int getShortBarrierRetryIntervalInMsForMultiRegion() { return SHORT_BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION; } public int getDirectHttpsMaxConnectionLimit() { return getJVMConfigAsInt(MAX_DIRECT_HTTPS_POOL_SIZE, DEFAULT_DIRECT_HTTPS_POOL_SIZE); } public int getMaxHttpHeaderSize() { return getJVMConfigAsInt(MAX_HTTP_HEADER_SIZE_IN_BYTES, DEFAULT_MAX_HTTP_REQUEST_HEADER_SIZE); } public int getMaxHttpInitialLineLength() { return getJVMConfigAsInt(MAX_HTTP_INITIAL_LINE_LENGTH_IN_BYTES, DEFAULT_MAX_HTTP_INITIAL_LINE_LENGTH); } public int getMaxHttpChunkSize() { return getJVMConfigAsInt(MAX_HTTP_CHUNK_SIZE_IN_BYTES, DEFAULT_MAX_HTTP_CHUNK_SIZE_IN_BYTES); } public int getMaxHttpBodyLength() { return getJVMConfigAsInt(MAX_HTTP_BODY_LENGTH_IN_BYTES, DEFAULT_MAX_HTTP_BODY_LENGTH_IN_BYTES); } public int getUnavailableLocationsExpirationTimeInSeconds() { return getJVMConfigAsInt(UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS, DEFAULT_UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS); } public static int getClientTelemetrySchedulingInSec() { return getJVMConfigAsInt(CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS, DEFAULT_CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS); } public int getGlobalEndpointManagerMaxInitializationTimeInSeconds() { return getJVMConfigAsInt(GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS, DEFAULT_GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS); } public String getReactorNettyConnectionPoolName() { return REACTOR_NETTY_CONNECTION_POOL_NAME; } public Duration getMaxIdleConnectionTimeout() { return MAX_IDLE_CONNECTION_TIMEOUT; } public Duration getConnectionAcquireTimeout() { return CONNECTION_ACQUIRE_TIMEOUT; } public int getReactorNettyMaxConnectionPoolSize() { return REACTOR_NETTY_MAX_CONNECTION_POOL_SIZE; } public static int getHttpResponseTimeoutInSeconds() { return getJVMConfigAsInt(HTTP_RESPONSE_TIMEOUT_IN_SECONDS, DEFAULT_HTTP_RESPONSE_TIMEOUT_IN_SECONDS); } public static int getQueryPlanResponseTimeoutInSeconds() { return getJVMConfigAsInt(QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS, DEFAULT_QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS); } public static String getClientTelemetryEndpoint() { return System.getProperty(CLIENT_TELEMETRY_ENDPOINT); } public static String getClientTelemetryProxyOptionsConfig() { return System.getProperty(CLIENT_TELEMETRY_PROXY_OPTIONS_CONFIG); } public static String getNonIdempotentWriteRetryPolicy() { String valueFromSystemProperty = System.getProperty(NON_IDEMPOTENT_WRITE_RETRY_POLICY); if (valueFromSystemProperty != null && !valueFromSystemProperty.isEmpty()) { return valueFromSystemProperty; } return System.getenv(NON_IDEMPOTENT_WRITE_RETRY_POLICY_VARIABLE); } public static String getEnvironmentName() { return System.getProperty(ENVIRONMENT_NAME); } public static boolean isQueryPlanCachingEnabled() { return getJVMConfigAsBoolean(QUERYPLAN_CACHING_ENABLED, true); } public static int getAddressRefreshResponseTimeoutInSeconds() { return getJVMConfigAsInt(ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS, DEFAULT_ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS); } public static int getSessionTokenMismatchDefaultWaitTimeInMs() { return getJVMConfigAsInt( DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS_NAME, DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS); } public static int getSessionTokenMismatchInitialBackoffTimeInMs() { return getJVMConfigAsInt( DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS_NAME, DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS); } public static int getSessionTokenMismatchMaximumBackoffTimeInMs() { return getJVMConfigAsInt( DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS_NAME, DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS); } public static int speculationThreshold() { return getJVMConfigAsInt(SPECULATION_THRESHOLD, 500); } public static int speculationThresholdStep() { return getJVMConfigAsInt(SPECULATION_THRESHOLD_STEP, 100); } public static boolean shouldSwitchOffIOThreadForResponse() { return getJVMConfigAsBoolean( SWITCH_OFF_IO_THREAD_FOR_RESPONSE_NAME, DEFAULT_SWITCH_OFF_IO_THREAD_FOR_RESPONSE); } public static boolean isEmptyPageDiagnosticsEnabled() { return getJVMConfigAsBoolean( QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED, DEFAULT_QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED); } public static boolean useLegacyTracing() { return getJVMConfigAsBoolean( USE_LEGACY_TRACING, DEFAULT_USE_LEGACY_TRACING); } private static int getJVMConfigAsInt(String propName, int defaultValue) { String propValue = System.getProperty(propName); return getIntValue(propValue, defaultValue); } private static boolean getJVMConfigAsBoolean(String propName, boolean defaultValue) { String propValue = System.getProperty(propName); return getBooleanValue(propValue, defaultValue); } private static int getIntValue(String val, int defaultValue) { if (StringUtils.isEmpty(val)) { return defaultValue; } else { return Integer.valueOf(val); } } private static boolean getBooleanValue(String val, boolean defaultValue) { if (StringUtils.isEmpty(val)) { return defaultValue; } else { return Boolean.valueOf(val); } } public static boolean isReplicaAddressValidationEnabled() { return getJVMConfigAsBoolean( REPLICA_ADDRESS_VALIDATION_ENABLED, DEFAULT_REPLICA_ADDRESS_VALIDATION_ENABLED); } public static boolean isTcpHealthCheckTimeoutDetectionEnabled() { return getJVMConfigAsBoolean( TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED, DEFAULT_TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED); } public static int getMinConnectionPoolSizePerEndpoint() { return getIntValue(System.getProperty(MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT), DEFAULT_MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT); } public static int getDefensiveWarmupConcurrency() { return getIntValue(System.getProperty(DEFENSIVE_WARMUP_CONCURRENCY), DEFAULT_DEFENSIVE_WARMUP_CONCURRENCY); } public static int getAggressiveWarmupConcurrency() { return getIntValue(System.getProperty(AGGRESSIVE_WARMUP_CONCURRENCY), DEFAULT_AGGRESSIVE_WARMUP_CONCURRENCY); } }
class Configs { private static final Logger logger = LoggerFactory.getLogger(Configs.class); /** * Integer value specifying the speculation type * <pre> * 0 - No speculation * 1 - Threshold based speculation * </pre> */ public static final String SPECULATION_TYPE = "COSMOS_SPECULATION_TYPE"; public static final String SPECULATION_THRESHOLD = "COSMOS_SPECULATION_THRESHOLD"; public static final String SPECULATION_THRESHOLD_STEP = "COSMOS_SPECULATION_THRESHOLD_STEP"; private final SslContext sslContext; private static final String PROTOCOL_ENVIRONMENT_VARIABLE = "AZURE_COSMOS_DIRECT_MODE_PROTOCOL"; private static final String PROTOCOL_PROPERTY = "azure.cosmos.directModeProtocol"; private static final Protocol DEFAULT_PROTOCOL = Protocol.TCP; private static final String UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS = "COSMOS.UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS"; private static final String GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS = "COSMOS.GLOBAL_ENDPOINT_MANAGER_MAX_INIT_TIME_IN_SECONDS"; private static final String MAX_HTTP_BODY_LENGTH_IN_BYTES = "COSMOS.MAX_HTTP_BODY_LENGTH_IN_BYTES"; private static final String MAX_HTTP_INITIAL_LINE_LENGTH_IN_BYTES = "COSMOS.MAX_HTTP_INITIAL_LINE_LENGTH_IN_BYTES"; private static final String MAX_HTTP_CHUNK_SIZE_IN_BYTES = "COSMOS.MAX_HTTP_CHUNK_SIZE_IN_BYTES"; private static final String MAX_HTTP_HEADER_SIZE_IN_BYTES = "COSMOS.MAX_HTTP_HEADER_SIZE_IN_BYTES"; private static final String MAX_DIRECT_HTTPS_POOL_SIZE = "COSMOS.MAX_DIRECT_HTTP_CONNECTION_LIMIT"; private static final String HTTP_RESPONSE_TIMEOUT_IN_SECONDS = "COSMOS.HTTP_RESPONSE_TIMEOUT_IN_SECONDS"; private static final String QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS = "COSMOS.QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS"; private static final String ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS = "COSMOS.ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS"; public static final String NON_IDEMPOTENT_WRITE_RETRY_POLICY = "COSMOS.WRITE_RETRY_POLICY"; public static final String NON_IDEMPOTENT_WRITE_RETRY_POLICY_VARIABLE = "COSMOS_WRITE_RETRY_POLICY"; private static final String CLIENT_TELEMETRY_PROXY_OPTIONS_CONFIG = "COSMOS.CLIENT_TELEMETRY_PROXY_OPTIONS_CONFIG"; private static final String CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS = "COSMOS.CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS"; private static final String CLIENT_TELEMETRY_ENDPOINT = "COSMOS.CLIENT_TELEMETRY_ENDPOINT"; private static final String ENVIRONMENT_NAME = "COSMOS.ENVIRONMENT_NAME"; private static final String QUERYPLAN_CACHING_ENABLED = "COSMOS.QUERYPLAN_CACHING_ENABLED"; private static final int DEFAULT_CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS = 10 * 60; private static final int DEFAULT_UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS = 5 * 60; private static final int DEFAULT_MAX_HTTP_BODY_LENGTH_IN_BYTES = 6 * 1024 * 1024; private static final int DEFAULT_MAX_HTTP_INITIAL_LINE_LENGTH = 4096; private static final int DEFAULT_MAX_HTTP_CHUNK_SIZE_IN_BYTES = 8192; private static final int DEFAULT_MAX_HTTP_REQUEST_HEADER_SIZE = 32 * 1024; private static final int MAX_NUMBER_OF_READ_BARRIER_READ_RETRIES = 6; private static final int MAX_NUMBER_OF_PRIMARY_READ_RETRIES = 6; private static final int MAX_NUMBER_OF_READ_QUORUM_RETRIES = 6; private static final int DELAY_BETWEEN_READ_BARRIER_CALLS_IN_MS = 5; private static final int MAX_BARRIER_RETRIES_FOR_MULTI_REGION = 30; private static final int BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION = 30; private static final int MAX_SHORT_BARRIER_RETRIES_FOR_MULTI_REGION = 4; private static final int SHORT_BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION = 10; private static final int CPU_CNT = Runtime.getRuntime().availableProcessors(); private static final int DEFAULT_DIRECT_HTTPS_POOL_SIZE = CPU_CNT * 500; private static final int DEFAULT_GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS = 2 * 60; private static final Duration MAX_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60); private static final Duration CONNECTION_ACQUIRE_TIMEOUT = Duration.ofSeconds(45); private static final int REACTOR_NETTY_MAX_CONNECTION_POOL_SIZE = 1000; private static final String REACTOR_NETTY_CONNECTION_POOL_NAME = "reactor-netty-connection-pool"; private static final int DEFAULT_HTTP_RESPONSE_TIMEOUT_IN_SECONDS = 60; private static final int DEFAULT_QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS = 5; private static final int DEFAULT_ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS = 5; private static final String DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS_NAME = "COSMOS.DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS"; private static final int DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS = 5000; private static final String DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS_NAME = "COSMOS.DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS"; private static final int DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS = 5; private static final String DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS_NAME = "COSMOS.DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS"; private static final int DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS = 50; private static final String SWITCH_OFF_IO_THREAD_FOR_RESPONSE_NAME = "COSMOS.SWITCH_OFF_IO_THREAD_FOR_RESPONSE"; private static final boolean DEFAULT_SWITCH_OFF_IO_THREAD_FOR_RESPONSE = false; private static final String QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED = "COSMOS.QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED"; private static final boolean DEFAULT_QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED = false; private static final String USE_LEGACY_TRACING = "COSMOS.USE_LEGACY_TRACING"; private static final boolean DEFAULT_USE_LEGACY_TRACING = false; private static final String REPLICA_ADDRESS_VALIDATION_ENABLED = "COSMOS.REPLICA_ADDRESS_VALIDATION_ENABLED"; private static final boolean DEFAULT_REPLICA_ADDRESS_VALIDATION_ENABLED = true; private static final String TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED = "COSMOS.TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED"; private static final boolean DEFAULT_TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED = true; private static final String MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT = "COSMOS.MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT"; private static final int DEFAULT_MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT = 1; private static final String AGGRESSIVE_WARMUP_CONCURRENCY = "COSMOS.AGGRESSIVE_WARMUP_CONCURRENCY"; private static final int DEFAULT_AGGRESSIVE_WARMUP_CONCURRENCY = Configs.getCPUCnt(); private static final String DEFENSIVE_WARMUP_CONCURRENCY = "COSMOS.DEFENSIVE_WARMUP_CONCURRENCY"; private static final int DEFAULT_DEFENSIVE_WARMUP_CONCURRENCY = 1; public Configs() { this.sslContext = sslContextInit(); } public static int getCPUCnt() { return CPU_CNT; } private SslContext sslContextInit() { try { SslProvider sslProvider = SslContext.defaultClientProvider(); return SslContextBuilder.forClient().sslProvider(sslProvider).build(); } catch (SSLException sslException) { logger.error("Fatal error cannot instantiate ssl context due to {}", sslException.getMessage(), sslException); throw new IllegalStateException(sslException); } } public SslContext getSslContext() { return this.sslContext; } public Protocol getProtocol() { String protocol = System.getProperty(PROTOCOL_PROPERTY, firstNonNull( emptyToNull(System.getenv().get(PROTOCOL_ENVIRONMENT_VARIABLE)), DEFAULT_PROTOCOL.name())); try { return Protocol.valueOf(protocol.toUpperCase(Locale.ROOT)); } catch (Exception e) { logger.error("Parsing protocol {} failed. Using the default {}.", protocol, DEFAULT_PROTOCOL, e); return DEFAULT_PROTOCOL; } } public int getMaxNumberOfReadBarrierReadRetries() { return MAX_NUMBER_OF_READ_BARRIER_READ_RETRIES; } public int getMaxNumberOfPrimaryReadRetries() { return MAX_NUMBER_OF_PRIMARY_READ_RETRIES; } public int getMaxNumberOfReadQuorumRetries() { return MAX_NUMBER_OF_READ_QUORUM_RETRIES; } public int getDelayBetweenReadBarrierCallsInMs() { return DELAY_BETWEEN_READ_BARRIER_CALLS_IN_MS; } public int getMaxBarrierRetriesForMultiRegion() { return MAX_BARRIER_RETRIES_FOR_MULTI_REGION; } public int getBarrierRetryIntervalInMsForMultiRegion() { return BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION; } public int getMaxShortBarrierRetriesForMultiRegion() { return MAX_SHORT_BARRIER_RETRIES_FOR_MULTI_REGION; } public int getShortBarrierRetryIntervalInMsForMultiRegion() { return SHORT_BARRIER_RETRY_INTERVAL_IN_MS_FOR_MULTI_REGION; } public int getDirectHttpsMaxConnectionLimit() { return getJVMConfigAsInt(MAX_DIRECT_HTTPS_POOL_SIZE, DEFAULT_DIRECT_HTTPS_POOL_SIZE); } public int getMaxHttpHeaderSize() { return getJVMConfigAsInt(MAX_HTTP_HEADER_SIZE_IN_BYTES, DEFAULT_MAX_HTTP_REQUEST_HEADER_SIZE); } public int getMaxHttpInitialLineLength() { return getJVMConfigAsInt(MAX_HTTP_INITIAL_LINE_LENGTH_IN_BYTES, DEFAULT_MAX_HTTP_INITIAL_LINE_LENGTH); } public int getMaxHttpChunkSize() { return getJVMConfigAsInt(MAX_HTTP_CHUNK_SIZE_IN_BYTES, DEFAULT_MAX_HTTP_CHUNK_SIZE_IN_BYTES); } public int getMaxHttpBodyLength() { return getJVMConfigAsInt(MAX_HTTP_BODY_LENGTH_IN_BYTES, DEFAULT_MAX_HTTP_BODY_LENGTH_IN_BYTES); } public int getUnavailableLocationsExpirationTimeInSeconds() { return getJVMConfigAsInt(UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS, DEFAULT_UNAVAILABLE_LOCATIONS_EXPIRATION_TIME_IN_SECONDS); } public static int getClientTelemetrySchedulingInSec() { return getJVMConfigAsInt(CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS, DEFAULT_CLIENT_TELEMETRY_SCHEDULING_IN_SECONDS); } public int getGlobalEndpointManagerMaxInitializationTimeInSeconds() { return getJVMConfigAsInt(GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS, DEFAULT_GLOBAL_ENDPOINT_MANAGER_INITIALIZATION_TIME_IN_SECONDS); } public String getReactorNettyConnectionPoolName() { return REACTOR_NETTY_CONNECTION_POOL_NAME; } public Duration getMaxIdleConnectionTimeout() { return MAX_IDLE_CONNECTION_TIMEOUT; } public Duration getConnectionAcquireTimeout() { return CONNECTION_ACQUIRE_TIMEOUT; } public int getReactorNettyMaxConnectionPoolSize() { return REACTOR_NETTY_MAX_CONNECTION_POOL_SIZE; } public static int getHttpResponseTimeoutInSeconds() { return getJVMConfigAsInt(HTTP_RESPONSE_TIMEOUT_IN_SECONDS, DEFAULT_HTTP_RESPONSE_TIMEOUT_IN_SECONDS); } public static int getQueryPlanResponseTimeoutInSeconds() { return getJVMConfigAsInt(QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS, DEFAULT_QUERY_PLAN_RESPONSE_TIMEOUT_IN_SECONDS); } public static String getClientTelemetryEndpoint() { return System.getProperty(CLIENT_TELEMETRY_ENDPOINT); } public static String getClientTelemetryProxyOptionsConfig() { return System.getProperty(CLIENT_TELEMETRY_PROXY_OPTIONS_CONFIG); } public static String getNonIdempotentWriteRetryPolicy() { String valueFromSystemProperty = System.getProperty(NON_IDEMPOTENT_WRITE_RETRY_POLICY); if (valueFromSystemProperty != null && !valueFromSystemProperty.isEmpty()) { return valueFromSystemProperty; } return System.getenv(NON_IDEMPOTENT_WRITE_RETRY_POLICY_VARIABLE); } public static String getEnvironmentName() { return System.getProperty(ENVIRONMENT_NAME); } public static boolean isQueryPlanCachingEnabled() { return getJVMConfigAsBoolean(QUERYPLAN_CACHING_ENABLED, true); } public static int getAddressRefreshResponseTimeoutInSeconds() { return getJVMConfigAsInt(ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS, DEFAULT_ADDRESS_REFRESH_RESPONSE_TIMEOUT_IN_SECONDS); } public static int getSessionTokenMismatchDefaultWaitTimeInMs() { return getJVMConfigAsInt( DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS_NAME, DEFAULT_SESSION_TOKEN_MISMATCH_WAIT_TIME_IN_MILLISECONDS); } public static int getSessionTokenMismatchInitialBackoffTimeInMs() { return getJVMConfigAsInt( DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS_NAME, DEFAULT_SESSION_TOKEN_MISMATCH_INITIAL_BACKOFF_TIME_IN_MILLISECONDS); } public static int getSessionTokenMismatchMaximumBackoffTimeInMs() { return getJVMConfigAsInt( DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS_NAME, DEFAULT_SESSION_TOKEN_MISMATCH_MAXIMUM_BACKOFF_TIME_IN_MILLISECONDS); } public static int speculationThreshold() { return getJVMConfigAsInt(SPECULATION_THRESHOLD, 500); } public static int speculationThresholdStep() { return getJVMConfigAsInt(SPECULATION_THRESHOLD_STEP, 100); } public static boolean shouldSwitchOffIOThreadForResponse() { return getJVMConfigAsBoolean( SWITCH_OFF_IO_THREAD_FOR_RESPONSE_NAME, DEFAULT_SWITCH_OFF_IO_THREAD_FOR_RESPONSE); } public static boolean isEmptyPageDiagnosticsEnabled() { return getJVMConfigAsBoolean( QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED, DEFAULT_QUERY_EMPTY_PAGE_DIAGNOSTICS_ENABLED); } public static boolean useLegacyTracing() { return getJVMConfigAsBoolean( USE_LEGACY_TRACING, DEFAULT_USE_LEGACY_TRACING); } private static int getJVMConfigAsInt(String propName, int defaultValue) { String propValue = System.getProperty(propName); return getIntValue(propValue, defaultValue); } private static boolean getJVMConfigAsBoolean(String propName, boolean defaultValue) { String propValue = System.getProperty(propName); return getBooleanValue(propValue, defaultValue); } private static int getIntValue(String val, int defaultValue) { if (StringUtils.isEmpty(val)) { return defaultValue; } else { return Integer.valueOf(val); } } private static boolean getBooleanValue(String val, boolean defaultValue) { if (StringUtils.isEmpty(val)) { return defaultValue; } else { return Boolean.valueOf(val); } } public static boolean isReplicaAddressValidationEnabled() { return getJVMConfigAsBoolean( REPLICA_ADDRESS_VALIDATION_ENABLED, DEFAULT_REPLICA_ADDRESS_VALIDATION_ENABLED); } public static boolean isTcpHealthCheckTimeoutDetectionEnabled() { return getJVMConfigAsBoolean( TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED, DEFAULT_TCP_HEALTH_CHECK_TIMEOUT_DETECTION_ENABLED); } public static int getMinConnectionPoolSizePerEndpoint() { return getIntValue(System.getProperty(MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT), DEFAULT_MIN_CONNECTION_POOL_SIZE_PER_ENDPOINT); } public static int getDefensiveWarmupConcurrency() { return getIntValue(System.getProperty(DEFENSIVE_WARMUP_CONCURRENCY), DEFAULT_DEFENSIVE_WARMUP_CONCURRENCY); } public static int getAggressiveWarmupConcurrency() { return getIntValue(System.getProperty(AGGRESSIVE_WARMUP_CONCURRENCY), DEFAULT_AGGRESSIVE_WARMUP_CONCURRENCY); } }
We should add champion scenarios code samples here.
public Response<TableClient> createTableWithResponse(String tableName, Duration timeout, Context context) { OptionalLong timeoutInMillis = TableUtils.setTimeout(timeout); Callable<Response<TableClient>> callable = () -> createTableWithResponse(tableName, context); try { return timeoutInMillis.isPresent() ? THREAD_POOL.submit(callable).get(timeoutInMillis.getAsLong(), TimeUnit.MILLISECONDS) : callable.call(); } catch (Exception ex) { throw logger.logExceptionAsError((RuntimeException) TableUtils.mapThrowableToTableServiceException(ex)); } }
}
public Response<TableClient> createTableWithResponse(String tableName, Duration timeout, Context context) { Supplier<Response<TableClient>> callable = () -> createTableWithResponse(tableName, context); return callWithOptionalTimeout(callable, THREAD_POOL, timeout, logger); }
class TableServiceClient { private static final ExecutorService THREAD_POOL = TableUtils.getThreadPoolWithShutdownHook(); private final ClientLogger logger = new ClientLogger(TableServiceClient.class); private final AzureTableImpl implementation; private final String accountName; private final HttpPipeline pipeline; TableServiceClient(HttpPipeline pipeline, String url, TableServiceVersion serviceVersion, SerializerAdapter serializerAdapter) { try { final URI uri = URI.create(url); this.accountName = uri.getHost().split("\\.", 2)[0]; logger.verbose("Table Service URI: {}", uri); } catch (NullPointerException | IllegalArgumentException ex) { throw logger.logExceptionAsError(ex); } this.implementation = new AzureTableImplBuilder() .serializerAdapter(serializerAdapter) .url(url) .pipeline(pipeline) .version(serviceVersion.getVersion()) .buildClient(); this.pipeline = implementation.getHttpPipeline(); } /** * Gets the name of the account containing the table. * * @return The name of the account containing the table. */ public String getAccountName() { return accountName; } /** * Gets the endpoint for the Tables service. * * @return The endpoint for the Tables service. */ public String getServiceEndpoint() { return implementation.getUrl(); } /** * Gets the REST API version used by this client. * * @return The REST API version used by this client. */ public TableServiceVersion getServiceVersion() { return TableServiceVersion.fromString(implementation.getVersion()); } /** * Gets the {@link HttpPipeline} powering this client. * * @return This client's {@link HttpPipeline}. */ HttpPipeline getHttpPipeline() { return this.pipeline; } /** * Generates an account SAS for the Azure Storage account using the specified * {@link TableAccountSasSignatureValues}. * * <p><strong>Note:</strong> The client must be authenticated via {@link AzureNamedKeyCredential}.</p> * <p>See {@link TableAccountSasSignatureValues} for more information on how to construct an account SAS.</p> * * @param tableAccountSasSignatureValues {@link TableAccountSasSignatureValues}. * * @return A {@code String} representing the SAS query parameters. * * @throws IllegalStateException If this {@link TableClient} is not authenticated with an * {@link AzureNamedKeyCredential}. */ public String generateAccountSas(TableAccountSasSignatureValues tableAccountSasSignatureValues) { AzureNamedKeyCredential azureNamedKeyCredential = TableSasUtils.extractNamedKeyCredential(getHttpPipeline()); if (azureNamedKeyCredential == null) { throw logger.logExceptionAsError(new IllegalStateException("Cannot generate a SAS token with a client that" + " is not authenticated with an AzureNamedKeyCredential.")); } return new TableAccountSasGenerator(tableAccountSasSignatureValues, azureNamedKeyCredential).getSas(); } /** * Gets a {@link TableClient} instance for the table in the account with the provided {@code tableName}. The * resulting {@link TableClient} will use the same {@link HttpPipeline pipeline} and * {@link TableServiceVersion service version} as this {@link TableServiceClient}. * * @param tableName The name of the table. * * @return A {@link TableClient} instance for the table in the account with the provided {@code tableName}. * * @throws IllegalArgumentException If {@code tableName} is {@code null} or empty. */ public TableClient getTableClient(String tableName) { return new TableClientBuilder() .pipeline(this.implementation.getHttpPipeline()) .serviceVersion(this.getServiceVersion()) .endpoint(this.getServiceEndpoint()) .tableName(tableName) .buildClient(); } /** * Creates a table within the Tables service. * * <p><strong>Code Samples</strong></p> * <p>Creates a table. Prints out the details of the created table.</p> * <!-- src_embed com.azure.data.tables.tableServiceClient.createTable * <pre> * TableClient tableClient = tableServiceClient.createTable& * * System.out.printf& * </pre> * <!-- end com.azure.data.tables.tableServiceClient.createTable * * @param tableName The name of the table to create. * * @return A {@link TableClient} for the created table. * * @throws IllegalArgumentException If {@code tableName} is {@code null} or empty. * @throws TableServiceException If a table with the same name already exists within the service. */ @ServiceMethod(returns = ReturnType.SINGLE) public TableClient createTable(String tableName) { return createTableWithResponse(tableName, null, null).getValue(); } /** * Creates a table within the Tables service. * * <p><strong>Code Samples</strong></p> * <p>Creates a table. Prints out the details of the {@link Response HTTP response} and the created table.</p> * <!-- src_embed com.azure.data.tables.tableServiceClient.createTableWithResponse * <pre> * Response&lt;TableClient&gt; response = tableServiceClient.createTableWithResponse& * new Context& * * System.out.printf& * response.getStatusCode& * </pre> * <!-- end com.azure.data.tables.tableServiceClient.createTableWithResponse * * @param tableName The name of the table to create. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during * the service call. * * @return The {@link Response HTTP response} containing a {@link TableClient} for the created table. * * @throws IllegalArgumentException If {@code tableName} is {@code null} or empty. * @throws TableServiceException If a table with the same name already exists within the service. */ @ServiceMethod(returns = ReturnType.SINGLE) Response<TableClient> createTableWithResponse(String tableName, Context context) { context = TableUtils.setContext(context, true); final TableProperties properties = new TableProperties().setTableName(tableName); return new SimpleResponse<>(implementation.getTables() .createWithResponse(properties, null, ResponseFormat.RETURN_NO_CONTENT, null, context), getTableClient(tableName)); } /** * Creates a table within the Tables service if the table does not already exist. * * <p><strong>Code Samples</strong></p> * <p>Creates a table if it does not already exist. Prints out the details of the created table.</p> * <!-- src_embed com.azure.data.tables.tableServiceClient.createTableIfNotExists * <pre> * TableClient tableClient = tableServiceClient.createTableIfNotExists& * * System.out.printf& * </pre> * <!-- end com.azure.data.tables.tableServiceClient.createTableIfNotExists * * @param tableName The name of the table to create. * * @return A {@link TableClient} for the created table. * * @throws IllegalArgumentException If {@code tableName} is {@code null} or empty. */ @ServiceMethod(returns = ReturnType.SINGLE) public TableClient createTableIfNotExists(String tableName) { return createTableIfNotExistsWithResponse(tableName, null, null).getValue(); } /** * Creates a table within the Tables service if the table does not already exist. * * <p><strong>Code Samples</strong></p> * <p>Creates a table if it does not already exist. Prints out the details of the {@link Response HTTP response} * and the created table.</p> * <!-- src_embed com.azure.data.tables.tableServiceClient.createTableIfNotExistsWithResponse * <pre> * Response&lt;TableClient&gt; response = * tableServiceClient.createTableIfNotExistsWithResponse& * new Context& * * System.out.printf& * response.getStatusCode& * </pre> * <!-- end com.azure.data.tables.tableServiceClient.createTableIfNotExistsWithResponse * * @param tableName The name of the table to create. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during * the service call. * * @return The {@link Response HTTP response} containing a {@link TableClient} for the created table. * * @throws IllegalArgumentException If {@code tableName} is {@code null} or empty. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<TableClient> createTableIfNotExistsWithResponse(String tableName, Duration timeout, Context context) { OptionalLong timeoutInMillis = TableUtils.setTimeout(timeout); Callable<Response<TableClient>> callable = () -> createTableIfNotExistsWithResponse(tableName, context); try { return timeoutInMillis.isPresent() ? THREAD_POOL.submit(callable).get(timeoutInMillis.getAsLong(), TimeUnit.MILLISECONDS) : callable.call(); } catch (Exception e) { throw logger.logExceptionAsError((RuntimeException) TableUtils.mapThrowableToTableServiceException(e)); } } Response<TableClient> createTableIfNotExistsWithResponse(String tableName, Context context) throws Exception { try { return createTableWithResponse(tableName, null, null); } catch (Exception e) { if (e instanceof TableServiceException && ((TableServiceException) e).getResponse() != null && ((TableServiceException) e).getResponse().getStatusCode() == 409) { HttpResponse response = ((TableServiceException) e).getResponse(); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } throw logger.logExceptionAsError(new RuntimeException(e)); } } /** * Deletes a table within the Tables service. * * <p><strong>Code Samples</strong></p> * <p>Deletes a table.</p> * <!-- src_embed com.azure.data.tables.tableServiceClient.deleteTable * <pre> * String tableName = &quot;myTable&quot;; * * tableServiceClient.deleteTable& * * System.out.printf& * </pre> * <!-- end com.azure.data.tables.tableServiceClient.deleteTable * * @param tableName The name of the table to delete. * * @throws IllegalArgumentException If {@code tableName} is {@code null} or empty. * @throws TableServiceException If the request is rejected by the service. */ @ServiceMethod(returns = ReturnType.SINGLE) public void deleteTable(String tableName) { deleteTableWithResponse(tableName, null, null); } /** * Deletes a table within the Tables service. * * <p><strong>Code Samples</strong></p> * <p>Deletes a table. Prints out the details of the {@link Response HTTP response}.</p> * <!-- src_embed com.azure.data.tables.tableServiceClient.deleteTableWithResponse * <pre> * String myTableName = &quot;myTable&quot;; * * Response&lt;Void&gt; response = tableServiceClient.deleteTableWithResponse& * new Context& * * System.out.printf& * response.getStatusCode& * </pre> * <!-- end com.azure.data.tables.tableServiceClient.deleteTableWithResponse * * @param tableName The name of the table to delete. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during * the service call. * * @return The {@link Response HTTP response}. * * @throws IllegalArgumentException If {@code tableName} is {@code null} or empty. * @throws TableServiceException If the request is rejected by the service. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> deleteTableWithResponse(String tableName, Duration timeout, Context context) { OptionalLong timeoutInMillis = TableUtils.setTimeout(timeout); Callable<Response<Void>> callable = () -> deleteTableWithResponse(tableName, context); try { return timeoutInMillis.isPresent() ? THREAD_POOL.submit(callable).get(timeoutInMillis.getAsLong(), TimeUnit.MILLISECONDS) : callable.call(); } catch (Exception e) { Exception exception = (Exception) TableUtils.mapThrowableToTableServiceException(e); if (exception instanceof TableServiceException && ((TableServiceException) exception).getResponse().getStatusCode() == 404) { HttpResponse httpResponse = ((TableServiceException) exception).getResponse(); return new SimpleResponse<>(httpResponse.getRequest(), httpResponse.getStatusCode(), httpResponse.getHeaders(), null); } throw logger.logExceptionAsError(new RuntimeException(exception)); } } Response<Void> deleteTableWithResponse(String tableName, Context context) { context = TableUtils.setContext(context, true); return new SimpleResponse<>( implementation.getTables().deleteWithResponse(tableName, null, context), null); } /** * Lists all tables within the account. * * <p><strong>Code Samples</strong></p> * <p>Lists all tables. Prints out the details of the retrieved tables.</p> * <!-- src_embed com.azure.data.tables.tableServiceClient.listTables --> * <pre> * PagedIterable&lt;TableItem&gt; tableItems = tableServiceClient.listTables& * * tableItems.forEach& * System.out.printf& * </pre> * <!-- end com.azure.data.tables.tableServiceClient.listTables --> * * @return A {@link PagedIterable} containing all tables within the account. * * @throws TableServiceException If the request is rejected by the service. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<TableItem> listTables() { return listTables(new ListTablesOptions(), null, null); } /** * If the {@code filter} parameter in the options is set, only tables matching the filter will be returned. If the * {@code top} parameter is set, the maximum number of returned tables per page will be limited to that value. * * <p><strong>Code Samples</strong></p> * <p>Lists all tables that match the filter. Prints out the details of the retrieved tables.</p> * <!-- src_embed com.azure.data.tables.tableServiceClient.listTables * <pre> * ListTablesOptions options = new ListTablesOptions& * * PagedIterable&lt;TableItem&gt; retrievedTableItems = tableServiceClient.listTables& * new Context& * * retrievedTableItems.forEach& * System.out.printf& * </pre> * <!-- end com.azure.data.tables.tableServiceClient.listTables * * @param options The {@code filter} and {@code top} OData query options to apply to this operation. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during * the service call. * * @return A {@link PagedIterable} containing matching tables within the account. * * @throws IllegalArgumentException If one or more of the OData query options in {@code options} is malformed. * @throws TableServiceException If the request is rejected by the service. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<TableItem> listTables(ListTablesOptions options, Duration timeout, Context context) { OptionalLong timeoutInMillis = TableUtils.setTimeout(timeout); Callable<PagedIterable<TableItem>> callable = () -> listTables(options, context); try { return timeoutInMillis.isPresent() ? THREAD_POOL.submit(callable).get(timeoutInMillis.getAsLong(), TimeUnit.MILLISECONDS) : callable.call(); } catch (Exception e) { throw logger.logExceptionAsError((RuntimeException) TableUtils.mapThrowableToTableServiceException(e)); } } private PagedIterable<TableItem> listTables(ListTablesOptions options, Context context) { return new PagedIterable<TableItem>( () -> listTablesFirstPage(context, options), token -> listTablesNextPage(token, context, options) ); } private PagedResponse<TableItem> listTablesFirstPage(Context context, ListTablesOptions options) { return listTables(null, context, options); } private PagedResponse<TableItem> listTablesNextPage(String token, Context context, ListTablesOptions options) { return listTables(token, context, options); } private PagedResponse<TableItem> listTables(String nextTableName, Context context, ListTablesOptions options) { context = TableUtils.setContext(context, true); QueryOptions queryOptions = new QueryOptions() .setFilter(options.getFilter()) .setTop(options.getTop()) .setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA); ResponseBase<TablesQueryHeaders, TableQueryResponse> response = implementation.getTables().queryWithResponse(null, nextTableName, queryOptions, context); TableQueryResponse tableQueryResponse = response.getValue(); if (tableQueryResponse == null) { return null; } List<TableResponseProperties> tableResponsePropertiesList = tableQueryResponse.getValue(); if (tableResponsePropertiesList == null) { return null; } final List<TableItem> tables = tableResponsePropertiesList.stream() .map(TableItemAccessHelper::createItem).collect(Collectors.toList()); return new TablePaged(response, tables, response.getDeserializedHeaders().getXMsContinuationNextTableName()); } /** * Gets the properties of the account's Table service, including properties for Analytics and CORS (Cross-Origin * Resource Sharing) rules. * * <p>This operation is only supported on Azure Storage endpoints.</p> * * <p><strong>Code Samples</strong></p> * <p>Gets the properties of the account's Table service.</p> * <!-- src_embed com.azure.data.tables.tableServiceClient.getProperties --> * <pre> * TableServiceProperties properties = tableServiceClient.getProperties& * * System.out.print& * </pre> * <!-- end com.azure.data.tables.tableServiceClient.getProperties --> * * @return The {@link TableServiceProperties properties} of the account's Table service. * * @throws TableServiceException If the request is rejected by the service. */ @ServiceMethod(returns = ReturnType.SINGLE) public TableServiceProperties getProperties() { return getPropertiesWithResponse(null, null).getValue(); } /** * Gets the properties of the account's Table service, including properties for Analytics and CORS (Cross-Origin * Resource Sharing) rules. * * <p>This operation is only supported on Azure Storage endpoints.</p> * * <p><strong>Code Samples</strong></p> * <p>Gets the properties of the account's Table service. Prints out the details of the * {@link Response HTTP response}.</p> * <!-- src_embed com.azure.data.tables.tableServiceClient.getPropertiesWithResponse * <pre> * Response&lt;TableServiceProperties&gt; response = * tableServiceClient.getPropertiesWithResponse& * * System.out.printf& * </pre> * <!-- end com.azure.data.tables.tableServiceClient.getPropertiesWithResponse * * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during * the service call. * * @return The {@link Response HTTP response} and the {@link TableServiceProperties properties} of the account's * Table service. * * @throws TableServiceException If the request is rejected by the service. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<TableServiceProperties> getPropertiesWithResponse(Duration timeout, Context context) { OptionalLong timeoutInMillis = TableUtils.setTimeout(timeout); Callable<Response<TableServiceProperties>> callable = () -> getPropertiesWithResponse(context); try { return timeoutInMillis.isPresent() ? THREAD_POOL.submit(callable).get(timeoutInMillis.getAsLong(), TimeUnit.MILLISECONDS) : callable.call(); } catch (Exception ex) { throw logger.logExceptionAsError((RuntimeException) TableUtils.mapThrowableToTableServiceException(ex)); } } Response<TableServiceProperties> getPropertiesWithResponse(Context context) { context = TableUtils.setContext(context, true); Response<com.azure.data.tables.implementation.models.TableServiceProperties> response = this.implementation.getServices().getPropertiesWithResponse(null, null, context); return new SimpleResponse<>(response, TableUtils.toTableServiceProperties(response.getValue())); } /** * Sets the properties of the account's Table service, including properties for Analytics and CORS (Cross-Origin * Resource Sharing) rules. * * <p>This operation is only supported on Azure Storage endpoints.</p> * * <p><strong>Code Samples</strong></p> * <p>Sets the properties of the account's Table service.</p> * <!-- src_embed com.azure.data.tables.tableServiceClient.setProperties * <pre> * TableServiceProperties properties = new TableServiceProperties& * .setHourMetrics& * .setVersion& * .setEnabled& * .setLogging& * .setAnalyticsVersion& * .setReadLogged& * .setRetentionPolicy& * .setEnabled& * .setDaysToRetain& * * tableServiceClient.setProperties& * * System.out.print& * </pre> * <!-- end com.azure.data.tables.tableServiceClient.setProperties * * @param tableServiceProperties The {@link TableServiceProperties} to set. * * @throws TableServiceException If the request is rejected by the service. */ @ServiceMethod(returns = ReturnType.SINGLE) public void setProperties(TableServiceProperties tableServiceProperties) { setPropertiesWithResponse(tableServiceProperties, null, null); } /** * Sets the properties of an account's Table service, including properties for Analytics and CORS (Cross-Origin * Resource Sharing) rules. * * <p>This operation is only supported on Azure Storage endpoints.</p> * * <p><strong>Code Samples</strong></p> * <p>Sets the properties of the account's Table service. Prints out the details of the * {@link Response HTTP response}.</p> * <!-- src_embed com.azure.data.tables.tableServiceClient.setPropertiesWithResponse * <pre> * TableServiceProperties myProperties = new TableServiceProperties& * .setHourMetrics& * .setVersion& * .setEnabled& * .setLogging& * .setAnalyticsVersion& * .setReadLogged& * .setRetentionPolicy& * .setEnabled& * .setDaysToRetain& * * Response&lt;Void&gt; response = tableServiceClient.setPropertiesWithResponse& * new Context& * * System.out.printf& * </pre> * <!-- end com.azure.data.tables.tableServiceClient.setPropertiesWithResponse * * @param tableServiceProperties The {@link TableServiceProperties} to set. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during * the service call. * * @return The {@link Response HTTP response}. * * @throws TableServiceException If the request is rejected by the service. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> setPropertiesWithResponse(TableServiceProperties tableServiceProperties, Duration timeout, Context context) { OptionalLong timeoutInMillis = TableUtils.setTimeout(timeout); Callable<Response<Void>> callable = () -> setPropertiesWithResponse(tableServiceProperties, context); try { return timeoutInMillis.isPresent() ? THREAD_POOL.submit(callable).get(timeoutInMillis.getAsLong(), TimeUnit.MILLISECONDS) : callable.call(); } catch (Exception e) { throw logger.logExceptionAsError((RuntimeException) TableUtils.mapThrowableToTableServiceException(e)); } } Response<Void> setPropertiesWithResponse(TableServiceProperties tableServiceProperties, Context context) { context = TableUtils.setContext(context, true); return new SimpleResponse<>(this.implementation.getServices() .setPropertiesWithResponse(TableUtils.toImplTableServiceProperties(tableServiceProperties), null, null, context), null); } /** * Retrieves statistics related to replication for the account's Table service. It is only available on the * secondary location endpoint when read-access geo-redundant replication is enabled for the account. * * <p>This operation is only supported on Azure Storage endpoints.</p> * * <p><strong>Code Samples</strong></p> * <p>Gets the replication statistics of the account's Table service.</p> * <!-- src_embed com.azure.data.tables.tableServiceClient.getStatistics --> * <pre> * TableServiceStatistics statistics = tableServiceClient.getStatistics& * * System.out.print& * </pre> * <!-- end com.azure.data.tables.tableServiceClient.getStatistics --> * * @return {@link TableServiceStatistics Statistics} for the account's Table service. * * @throws TableServiceException If the request is rejected by the service. */ @ServiceMethod(returns = ReturnType.SINGLE) public TableServiceStatistics getStatistics() { return getStatisticsWithResponse(null, null).getValue(); } /** * Retrieves statistics related to replication for the account's Table service. It is only available on the * secondary location endpoint when read-access geo-redundant replication is enabled for the account. * * <p>This operation is only supported on Azure Storage endpoints.</p> * * <p><strong>Code Samples</strong></p> * <p>Gets the replication statistics of the account's Table service. Prints out the details of the * {@link Response HTTP response}.</p> * <!-- src_embed com.azure.data.tables.tableServiceClient.getStatisticsWithResponse * <pre> * Response&lt;TableServiceStatistics&gt; response = tableServiceClient.getStatisticsWithResponse& * new Context& * * System.out.printf& * response.getStatusCode& * </pre> * <!-- end com.azure.data.tables.tableServiceClient.getStatisticsWithResponse * * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during * the service call. * * @return An {@link Response HTTP response} containing {@link TableServiceStatistics statistics} for the * account's Table service. * * @throws TableServiceException If the request is rejected by the service. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<TableServiceStatistics> getStatisticsWithResponse(Duration timeout, Context context) { OptionalLong timeoutInMillis = TableUtils.setTimeout(timeout); Callable<Response<TableServiceStatistics>> callable = () -> getStatisticsWithResponse(context); try { return timeoutInMillis.isPresent() ? THREAD_POOL.submit(callable).get(timeoutInMillis.getAsLong(), TimeUnit.MILLISECONDS) : callable.call(); } catch (Exception e) { throw logger.logExceptionAsError((RuntimeException) TableUtils.mapThrowableToTableServiceException(e)); } } Response<TableServiceStatistics> getStatisticsWithResponse(Context context) { context = TableUtils.setContext(context, true); Response<TableServiceStats> response = this.implementation.getServices().getStatisticsWithResponse( null, null, context); return new SimpleResponse<>(response, TableUtils.toTableServiceStatistics(response.getValue())); } }
class TableServiceClient { private static final ExecutorService THREAD_POOL = TableUtils.getThreadPoolWithShutdownHook(); private final ClientLogger logger = new ClientLogger(TableServiceClient.class); private final AzureTableImpl implementation; private final String accountName; private final HttpPipeline pipeline; TableServiceClient(HttpPipeline pipeline, String url, TableServiceVersion serviceVersion, SerializerAdapter serializerAdapter) { try { final URI uri = URI.create(url); this.accountName = uri.getHost().split("\\.", 2)[0]; logger.verbose("Table Service URI: {}", uri); } catch (NullPointerException | IllegalArgumentException ex) { throw logger.logExceptionAsError(ex); } this.implementation = new AzureTableImplBuilder() .serializerAdapter(serializerAdapter) .url(url) .pipeline(pipeline) .version(serviceVersion.getVersion()) .buildClient(); this.pipeline = implementation.getHttpPipeline(); } /** * Gets the name of the account containing the table. * * @return The name of the account containing the table. */ public String getAccountName() { return accountName; } /** * Gets the endpoint for the Tables service. * * @return The endpoint for the Tables service. */ public String getServiceEndpoint() { return implementation.getUrl(); } /** * Gets the REST API version used by this client. * * @return The REST API version used by this client. */ public TableServiceVersion getServiceVersion() { return TableServiceVersion.fromString(implementation.getVersion()); } /** * Gets the {@link HttpPipeline} powering this client. * * @return This client's {@link HttpPipeline}. */ HttpPipeline getHttpPipeline() { return this.pipeline; } /** * Generates an account SAS for the Azure Storage account using the specified * {@link TableAccountSasSignatureValues}. * * <p><strong>Note:</strong> The client must be authenticated via {@link AzureNamedKeyCredential}.</p> * <p>See {@link TableAccountSasSignatureValues} for more information on how to construct an account SAS.</p> * * @param tableAccountSasSignatureValues {@link TableAccountSasSignatureValues}. * * @return A {@code String} representing the SAS query parameters. * * @throws IllegalStateException If this {@link TableClient} is not authenticated with an * {@link AzureNamedKeyCredential}. */ public String generateAccountSas(TableAccountSasSignatureValues tableAccountSasSignatureValues) { AzureNamedKeyCredential azureNamedKeyCredential = TableSasUtils.extractNamedKeyCredential(getHttpPipeline()); if (azureNamedKeyCredential == null) { throw logger.logExceptionAsError(new IllegalStateException("Cannot generate a SAS token with a client that" + " is not authenticated with an AzureNamedKeyCredential.")); } return new TableAccountSasGenerator(tableAccountSasSignatureValues, azureNamedKeyCredential).getSas(); } /** * Gets a {@link TableClient} instance for the table in the account with the provided {@code tableName}. The * resulting {@link TableClient} will use the same {@link HttpPipeline pipeline} and * {@link TableServiceVersion service version} as this {@link TableServiceClient}. * * @param tableName The name of the table. * * @return A {@link TableClient} instance for the table in the account with the provided {@code tableName}. * * @throws IllegalArgumentException If {@code tableName} is {@code null} or empty. */ public TableClient getTableClient(String tableName) { return new TableClientBuilder() .pipeline(this.implementation.getHttpPipeline()) .serviceVersion(this.getServiceVersion()) .endpoint(this.getServiceEndpoint()) .tableName(tableName) .buildClient(); } /** * Creates a table within the Tables service. * * <p><strong>Code Samples</strong></p> * <p>Creates a table. Prints out the details of the created table.</p> * <!-- src_embed com.azure.data.tables.tableServiceClient.createTable * <pre> * TableClient tableClient = tableServiceClient.createTable& * * System.out.printf& * </pre> * <!-- end com.azure.data.tables.tableServiceClient.createTable * * @param tableName The name of the table to create. * * @return A {@link TableClient} for the created table. * * @throws IllegalArgumentException If {@code tableName} is {@code null} or empty. * @throws TableServiceException If a table with the same name already exists within the service. */ @ServiceMethod(returns = ReturnType.SINGLE) public TableClient createTable(String tableName) { return createTableWithResponse(tableName, null, null).getValue(); } /** * Creates a table within the Tables service. * * <p><strong>Code Samples</strong></p> * <p>Creates a table. Prints out the details of the {@link Response HTTP response} and the created table.</p> * <!-- src_embed com.azure.data.tables.tableServiceClient.createTableWithResponse * <pre> * Response&lt;TableClient&gt; response = tableServiceClient.createTableWithResponse& * new Context& * * System.out.printf& * response.getStatusCode& * </pre> * <!-- end com.azure.data.tables.tableServiceClient.createTableWithResponse * * @param tableName The name of the table to create. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during * the service call. * * @return The {@link Response HTTP response} containing a {@link TableClient} for the created table. * * @throws IllegalArgumentException If {@code tableName} is {@code null} or empty. * @throws TableServiceException If a table with the same name already exists within the service. */ @ServiceMethod(returns = ReturnType.SINGLE) Response<TableClient> createTableWithResponse(String tableName, Context context) { context = TableUtils.setContext(context, true); final TableProperties properties = new TableProperties().setTableName(tableName); return new SimpleResponse<>(implementation.getTables() .createWithResponse(properties, null, ResponseFormat.RETURN_NO_CONTENT, null, context), getTableClient(tableName)); } /** * Creates a table within the Tables service if the table does not already exist. * * <p><strong>Code Samples</strong></p> * <p>Creates a table if it does not already exist. Prints out the details of the created table.</p> * <!-- src_embed com.azure.data.tables.tableServiceClient.createTableIfNotExists * <pre> * TableClient tableClient = tableServiceClient.createTableIfNotExists& * * System.out.printf& * </pre> * <!-- end com.azure.data.tables.tableServiceClient.createTableIfNotExists * * @param tableName The name of the table to create. * * @return A {@link TableClient} for the created table. * * @throws IllegalArgumentException If {@code tableName} is {@code null} or empty. */ @ServiceMethod(returns = ReturnType.SINGLE) public TableClient createTableIfNotExists(String tableName) { return createTableIfNotExistsWithResponse(tableName, null, null).getValue(); } /** * Creates a table within the Tables service if the table does not already exist. * * <p><strong>Code Samples</strong></p> * <p>Creates a table if it does not already exist. Prints out the details of the {@link Response HTTP response} * and the created table.</p> * <!-- src_embed com.azure.data.tables.tableServiceClient.createTableIfNotExistsWithResponse * <pre> * Response&lt;TableClient&gt; response = * tableServiceClient.createTableIfNotExistsWithResponse& * new Context& * * System.out.printf& * response.getStatusCode& * </pre> * <!-- end com.azure.data.tables.tableServiceClient.createTableIfNotExistsWithResponse * * @param tableName The name of the table to create. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during * the service call. * * @return The {@link Response HTTP response} containing a {@link TableClient} for the created table. * * @throws IllegalArgumentException If {@code tableName} is {@code null} or empty. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<TableClient> createTableIfNotExistsWithResponse(String tableName, Duration timeout, Context context) { Supplier<Response<TableClient>> callable = () -> createTableIfNotExistsWithResponse(tableName, context); return callWithOptionalTimeout(callable, THREAD_POOL, timeout, logger); } Response<TableClient> createTableIfNotExistsWithResponse(String tableName, Context context) { try { return createTableWithResponse(tableName, null, context); } catch (Exception e) { if (e instanceof TableServiceException && ((TableServiceException) e).getResponse() != null && ((TableServiceException) e).getResponse().getStatusCode() == 409) { HttpResponse response = ((TableServiceException) e).getResponse(); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } throw logger.logExceptionAsError(new RuntimeException(e)); } } /** * Deletes a table within the Tables service. * * <p><strong>Code Samples</strong></p> * <p>Deletes a table.</p> * <!-- src_embed com.azure.data.tables.tableServiceClient.deleteTable * <pre> * tableServiceClient.deleteTable& * * System.out.printf& * </pre> * <!-- end com.azure.data.tables.tableServiceClient.deleteTable * * @param tableName The name of the table to delete. * * @throws IllegalArgumentException If {@code tableName} is {@code null} or empty. * @throws TableServiceException If the request is rejected by the service. */ @ServiceMethod(returns = ReturnType.SINGLE) public void deleteTable(String tableName) { deleteTableWithResponse(tableName, null, null); } /** * Deletes a table within the Tables service. * * <p><strong>Code Samples</strong></p> * <p>Deletes a table. Prints out the details of the {@link Response HTTP response}.</p> * <!-- src_embed com.azure.data.tables.tableServiceClient.deleteTableWithResponse * <pre> * Response&lt;Void&gt; response = tableServiceClient.deleteTableWithResponse& * new Context& * * System.out.printf& * response.getStatusCode& * </pre> * <!-- end com.azure.data.tables.tableServiceClient.deleteTableWithResponse * * @param tableName The name of the table to delete. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during * the service call. * * @return The {@link Response HTTP response}. * * @throws IllegalArgumentException If {@code tableName} is {@code null} or empty. * @throws TableServiceException If the request is rejected by the service. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> deleteTableWithResponse(String tableName, Duration timeout, Context context) { Supplier<Response<Void>> callable = () -> deleteTableWithResponse(tableName, context); try { return hasTimeout(timeout) ? getResultWithTimeout(THREAD_POOL.submit(callable::get), timeout) : callable.get(); } catch (InterruptedException | ExecutionException | TimeoutException e) { throw logger.logExceptionAsError(new RuntimeException(e)); } catch (RuntimeException e) { RuntimeException exception = (RuntimeException) TableUtils.mapThrowableToTableServiceException(e); if (exception instanceof TableServiceException && ((TableServiceException) exception).getResponse().getStatusCode() == 404) { HttpResponse httpResponse = ((TableServiceException) exception).getResponse(); return new SimpleResponse<>(httpResponse.getRequest(), httpResponse.getStatusCode(), httpResponse.getHeaders(), null); } throw logger.logExceptionAsError(exception); } } Response<Void> deleteTableWithResponse(String tableName, Context context) { context = TableUtils.setContext(context, true); return new SimpleResponse<>( implementation.getTables().deleteWithResponse(tableName, null, context), null); } /** * Lists all tables within the account. * * <p><strong>Code Samples</strong></p> * <p>Lists all tables. Prints out the details of the retrieved tables.</p> * <!-- src_embed com.azure.data.tables.tableServiceClient.listTables --> * <pre> * PagedIterable&lt;TableItem&gt; tableItems = tableServiceClient.listTables& * * tableItems.forEach& * System.out.printf& * </pre> * <!-- end com.azure.data.tables.tableServiceClient.listTables --> * * @return A {@link PagedIterable} containing all tables within the account. * * @throws TableServiceException If the request is rejected by the service. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<TableItem> listTables() { return listTables(new ListTablesOptions(), null, null); } /** * If the {@code filter} parameter in the options is set, only tables matching the filter will be returned. If the * {@code top} parameter is set, the maximum number of returned tables per page will be limited to that value. * * <p><strong>Code Samples</strong></p> * <p>Lists all tables that match the filter. Prints out the details of the retrieved tables.</p> * <!-- src_embed com.azure.data.tables.tableServiceClient.listTables * <pre> * ListTablesOptions options = new ListTablesOptions& * * PagedIterable&lt;TableItem&gt; retrievedTableItems = tableServiceClient.listTables& * new Context& * * retrievedTableItems.forEach& * System.out.printf& * </pre> * <!-- end com.azure.data.tables.tableServiceClient.listTables * * @param options The {@code filter} and {@code top} OData query options to apply to this operation. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during * the service call. * * @return A {@link PagedIterable} containing matching tables within the account. * * @throws IllegalArgumentException If one or more of the OData query options in {@code options} is malformed. * @throws TableServiceException If the request is rejected by the service. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<TableItem> listTables(ListTablesOptions options, Duration timeout, Context context) { Supplier<PagedIterable<TableItem>> callable = () -> listTables(options, context); return callWithOptionalTimeout(callable, THREAD_POOL, timeout, logger); } private PagedIterable<TableItem> listTables(ListTablesOptions options, Context context) { return new PagedIterable<TableItem>( () -> listTablesFirstPage(context, options), token -> listTablesNextPage(token, context, options) ); } private PagedResponse<TableItem> listTablesFirstPage(Context context, ListTablesOptions options) { return listTables(null, context, options); } private PagedResponse<TableItem> listTablesNextPage(String token, Context context, ListTablesOptions options) { return listTables(token, context, options); } private PagedResponse<TableItem> listTables(String nextTableName, Context context, ListTablesOptions options) { context = TableUtils.setContext(context, true); QueryOptions queryOptions = new QueryOptions() .setFilter(options.getFilter()) .setTop(options.getTop()) .setFormat(OdataMetadataFormat.APPLICATION_JSON_ODATA_FULLMETADATA); ResponseBase<TablesQueryHeaders, TableQueryResponse> response = implementation.getTables().queryWithResponse(null, nextTableName, queryOptions, context); TableQueryResponse tableQueryResponse = response.getValue(); if (tableQueryResponse == null) { return null; } List<TableResponseProperties> tableResponsePropertiesList = tableQueryResponse.getValue(); if (tableResponsePropertiesList == null) { return null; } final List<TableItem> tables = tableResponsePropertiesList.stream() .map(TableItemAccessHelper::createItem).collect(Collectors.toList()); return new TablePaged(response, tables, response.getDeserializedHeaders().getXMsContinuationNextTableName()); } /** * Gets the properties of the account's Table service, including properties for Analytics and CORS (Cross-Origin * Resource Sharing) rules. * * <p>This operation is only supported on Azure Storage endpoints.</p> * * <p><strong>Code Samples</strong></p> * <p>Gets the properties of the account's Table service.</p> * <!-- src_embed com.azure.data.tables.tableServiceClient.getProperties --> * <pre> * TableServiceProperties properties = tableServiceClient.getProperties& * * System.out.print& * </pre> * <!-- end com.azure.data.tables.tableServiceClient.getProperties --> * * @return The {@link TableServiceProperties properties} of the account's Table service. * * @throws TableServiceException If the request is rejected by the service. */ @ServiceMethod(returns = ReturnType.SINGLE) public TableServiceProperties getProperties() { return getPropertiesWithResponse(null, null).getValue(); } /** * Gets the properties of the account's Table service, including properties for Analytics and CORS (Cross-Origin * Resource Sharing) rules. * * <p>This operation is only supported on Azure Storage endpoints.</p> * * <p><strong>Code Samples</strong></p> * <p>Gets the properties of the account's Table service. Prints out the details of the * {@link Response HTTP response}.</p> * <!-- src_embed com.azure.data.tables.tableServiceClient.getPropertiesWithResponse * <pre> * Response&lt;TableServiceProperties&gt; response = * tableServiceClient.getPropertiesWithResponse& * * System.out.printf& * </pre> * <!-- end com.azure.data.tables.tableServiceClient.getPropertiesWithResponse * * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during * the service call. * * @return The {@link Response HTTP response} and the {@link TableServiceProperties properties} of the account's * Table service. * * @throws TableServiceException If the request is rejected by the service. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<TableServiceProperties> getPropertiesWithResponse(Duration timeout, Context context) { Supplier<Response<TableServiceProperties>> callable = () -> getPropertiesWithResponse(context); return callWithOptionalTimeout(callable, THREAD_POOL, timeout, logger); } Response<TableServiceProperties> getPropertiesWithResponse(Context context) { context = TableUtils.setContext(context, true); Response<com.azure.data.tables.implementation.models.TableServiceProperties> response = this.implementation.getServices().getPropertiesWithResponse(null, null, context); return new SimpleResponse<>(response, TableUtils.toTableServiceProperties(response.getValue())); } /** * Sets the properties of the account's Table service, including properties for Analytics and CORS (Cross-Origin * Resource Sharing) rules. * * <p>This operation is only supported on Azure Storage endpoints.</p> * * <p><strong>Code Samples</strong></p> * <p>Sets the properties of the account's Table service.</p> * <!-- src_embed com.azure.data.tables.tableServiceClient.setProperties * <pre> * TableServiceProperties properties = new TableServiceProperties& * .setHourMetrics& * .setVersion& * .setEnabled& * .setIncludeApis& * .setRetentionPolicy& * .setEnabled& * .setDaysToRetain& * .setLogging& * .setAnalyticsVersion& * .setReadLogged& * .setRetentionPolicy& * .setEnabled& * .setDaysToRetain& * * tableServiceClient.setProperties& * * System.out.printf& * </pre> * <!-- end com.azure.data.tables.tableServiceClient.setProperties * * @param tableServiceProperties The {@link TableServiceProperties} to set. * * @throws TableServiceException If the request is rejected by the service. */ @ServiceMethod(returns = ReturnType.SINGLE) public void setProperties(TableServiceProperties tableServiceProperties) { setPropertiesWithResponse(tableServiceProperties, null, null); } /** * Sets the properties of an account's Table service, including properties for Analytics and CORS (Cross-Origin * Resource Sharing) rules. * * <p>This operation is only supported on Azure Storage endpoints.</p> * * <p><strong>Code Samples</strong></p> * <p>Sets the properties of the account's Table service. Prints out the details of the * {@link Response HTTP response}.</p> * <!-- src_embed com.azure.data.tables.tableServiceClient.setPropertiesWithResponse * <pre> * TableServiceProperties myProperties = new TableServiceProperties& * .setHourMetrics& * .setVersion& * .setEnabled& * .setIncludeApis& * .setRetentionPolicy& * .setEnabled& * .setDaysToRetain& * .setLogging& * .setAnalyticsVersion& * .setReadLogged& * .setRetentionPolicy& * .setEnabled& * .setDaysToRetain& * * Response&lt;Void&gt; response = tableServiceClient.setPropertiesWithResponse& * new Context& * * System.out.printf& * </pre> * <!-- end com.azure.data.tables.tableServiceClient.setPropertiesWithResponse * * @param tableServiceProperties The {@link TableServiceProperties} to set. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during * the service call. * * @return The {@link Response HTTP response}. * * @throws TableServiceException If the request is rejected by the service. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> setPropertiesWithResponse(TableServiceProperties tableServiceProperties, Duration timeout, Context context) { Supplier<Response<Void>> callable = () -> setPropertiesWithResponse(tableServiceProperties, context); return callWithOptionalTimeout(callable, THREAD_POOL, timeout, logger); } Response<Void> setPropertiesWithResponse(TableServiceProperties tableServiceProperties, Context context) { context = TableUtils.setContext(context, true); return new SimpleResponse<>(this.implementation.getServices() .setPropertiesWithResponse(TableUtils.toImplTableServiceProperties(tableServiceProperties), null, null, context), null); } /** * Retrieves statistics related to replication for the account's Table service. It is only available on the * secondary location endpoint when read-access geo-redundant replication is enabled for the account. * * <p>This operation is only supported on Azure Storage endpoints.</p> * * <p><strong>Code Samples</strong></p> * <p>Gets the replication statistics of the account's Table service.</p> * <!-- src_embed com.azure.data.tables.tableServiceClient.getStatistics --> * <pre> * TableServiceStatistics statistics = tableServiceClient.getStatistics& * * System.out.print& * </pre> * <!-- end com.azure.data.tables.tableServiceClient.getStatistics --> * * @return {@link TableServiceStatistics Statistics} for the account's Table service. * * @throws TableServiceException If the request is rejected by the service. */ @ServiceMethod(returns = ReturnType.SINGLE) public TableServiceStatistics getStatistics() { return getStatisticsWithResponse(null, null).getValue(); } /** * Retrieves statistics related to replication for the account's Table service. It is only available on the * secondary location endpoint when read-access geo-redundant replication is enabled for the account. * * <p>This operation is only supported on Azure Storage endpoints.</p> * * <p><strong>Code Samples</strong></p> * <p>Gets the replication statistics of the account's Table service. Prints out the details of the * {@link Response HTTP response}.</p> * <!-- src_embed com.azure.data.tables.tableServiceClient.getStatisticsWithResponse * <pre> * Response&lt;TableServiceStatistics&gt; response = tableServiceClient.getStatisticsWithResponse& * new Context& * * System.out.printf& * response.getStatusCode& * </pre> * <!-- end com.azure.data.tables.tableServiceClient.getStatisticsWithResponse * * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional {@link Context} that is passed through the {@link HttpPipeline HTTP pipeline} during * the service call. * * @return An {@link Response HTTP response} containing {@link TableServiceStatistics statistics} for the * account's Table service. * * @throws TableServiceException If the request is rejected by the service. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<TableServiceStatistics> getStatisticsWithResponse(Duration timeout, Context context) { Supplier<Response<TableServiceStatistics>> callable = () -> getStatisticsWithResponse(context); return callWithOptionalTimeout(callable, THREAD_POOL, timeout, logger); } Response<TableServiceStatistics> getStatisticsWithResponse(Context context) { context = TableUtils.setContext(context, true); Response<TableServiceStats> response = this.implementation.getServices().getStatisticsWithResponse( null, null, context); return new SimpleResponse<>(response, TableUtils.toTableServiceStatistics(response.getValue())); } }
nit: Can we make this `requestResponse.getHeaders().remove(...)`? Semantically the same but clearer than "what does null mean".
public static HttpResponse resetTestProxyData(HttpResponse response) { HttpRequest responseRequest = response.getRequest(); HttpHeaders requestHeaders = responseRequest.getHeaders(); try { URL originalUrl = UrlBuilder.parse(requestHeaders.getValue(X_RECORDING_UPSTREAM_BASE_URI)) .toUrl(); UrlBuilder currentUrl = UrlBuilder.parse(responseRequest.getUrl()); currentUrl.setScheme(originalUrl.getProtocol()); currentUrl.setHost(originalUrl.getHost()); int port = originalUrl.getPort(); if (port == -1) { currentUrl.setPort(""); } else { currentUrl.setPort(port); } responseRequest.setUrl(currentUrl.toUrl()); responseRequest.setHeader(X_RECORDING_UPSTREAM_BASE_URI, null); responseRequest.setHeader(X_RECORDING_MODE, null); responseRequest.setHeader(X_RECORDING_SKIP, null); responseRequest.setHeader(X_RECORDING_ID, null); return response; } catch (MalformedURLException e) { throw new RuntimeException(e); } }
responseRequest.setHeader(X_RECORDING_ID, null);
public static HttpResponse resetTestProxyData(HttpResponse response) { HttpRequest responseRequest = response.getRequest(); HttpHeaders requestHeaders = responseRequest.getHeaders(); try { URL originalUrl = UrlBuilder.parse(requestHeaders.getValue(X_RECORDING_UPSTREAM_BASE_URI)) .toUrl(); UrlBuilder currentUrl = UrlBuilder.parse(responseRequest.getUrl()); currentUrl.setScheme(originalUrl.getProtocol()); currentUrl.setHost(originalUrl.getHost()); int port = originalUrl.getPort(); if (port == -1) { currentUrl.setPort(""); } else { currentUrl.setPort(port); } responseRequest.setUrl(currentUrl.toUrl()); requestHeaders.remove(X_RECORDING_UPSTREAM_BASE_URI); requestHeaders.remove(X_RECORDING_MODE); requestHeaders.remove(X_RECORDING_SKIP); requestHeaders.remove(X_RECORDING_ID); return response; } catch (MalformedURLException e) { throw new RuntimeException(e); } }
class TestProxyUtils { private static final ClientLogger LOGGER = new ClientLogger(TestProxyUtils.class); private static final HttpHeaderName X_RECORDING_SKIP = HttpHeaderName.fromString("x-recording-skip"); private static final List<String> JSON_PROPERTIES_TO_REDACT = new ArrayList<String>( Arrays.asList("authHeader", "accountKey", "accessToken", "accountName", "applicationId", "apiKey", "connectionString", "url", "host", "password", "userName")); private static final Map<String, String> HEADER_KEY_REGEX_TO_REDACT = new HashMap<String, String>() {{ put("Operation-Location", URL_REGEX); put("operation-location", URL_REGEX); put("Location", URL_REGEX); }}; private static final List<String> BODY_REGEX_TO_REDACT = new ArrayList<>(Arrays.asList("(?:<Value>)(?<secret>.*)(?:</Value>)", "(?:Password=)(?<secret>.*)(?:;)", "(?:User ID=)(?<secret>.*)(?:;)", "(?:<PrimaryKey>)(?<secret>.*)(?:</PrimaryKey>)", "(?:<SecondaryKey>)(?<secret>.*)(?:</SecondaryKey>)")); private static final String URL_REGEX = "(?<=http: private static final List<String> HEADER_KEYS_TO_REDACT = new ArrayList<>(Arrays.asList("Ocp-Apim-Subscription-Key", "api-key", "x-api-key")); private static final String REDACTED_VALUE = "REDACTED"; private static final String DELEGATION_KEY_CLIENTID_REGEX = "(?:<SignedOid>)(?<secret>.*)(?:</SignedOid>)"; private static final String DELEGATION_KEY_TENANTID_REGEX = "(?:<SignedTid>)(?<secret>.*)(?:</SignedTid>)"; private static final HttpHeaderName X_RECORDING_UPSTREAM_BASE_URI = HttpHeaderName.fromString("x-recording-upstream-base-uri"); private static final HttpHeaderName X_RECORDING_MODE = HttpHeaderName.fromString("x-recording-mode"); private static final HttpHeaderName X_REQUEST_MISMATCH_ERROR = HttpHeaderName.fromString("x-request-mismatch-error"); private static final HttpHeaderName X_REQUEST_KNOWN_EXCEPTION_ERROR = HttpHeaderName.fromString("x-request-known-exception-error"); private static final HttpHeaderName X_REQUEST_EXCEPTION_EXCEPTION_ERROR = HttpHeaderName.fromString("x-request-exception-exception-error"); private static final HttpHeaderName X_ABSTRACTION_IDENTIFIER = HttpHeaderName.fromString("x-abstraction-identifier"); private static volatile URL proxyUrl; /** * Adds headers required for communication with the test proxy. * * @param request The request to add headers to. * @param proxyUrl The {@link URL} the proxy lives at. * @param xRecordingId The x-recording-id value for the current session. * @param mode The current test proxy mode. * @param skipRecordingRequestBody Flag indicating to skip recording request bodies when tests run in Record mode. * @throws RuntimeException Construction of one of the URLs failed. */ public static void changeHeaders(HttpRequest request, URL proxyUrl, String xRecordingId, String mode, boolean skipRecordingRequestBody) { HttpHeader upstreamUri = request.getHeaders().get(X_RECORDING_UPSTREAM_BASE_URI); UrlBuilder proxyUrlBuilder = UrlBuilder.parse(request.getUrl()); proxyUrlBuilder.setScheme(proxyUrl.getProtocol()); proxyUrlBuilder.setHost(proxyUrl.getHost()); if (proxyUrl.getPort() != -1) { proxyUrlBuilder.setPort(proxyUrl.getPort()); } UrlBuilder originalUrlBuilder = UrlBuilder.parse(request.getUrl()); originalUrlBuilder.setPath(""); originalUrlBuilder.setQuery(""); try { URL originalUrl = originalUrlBuilder.toUrl(); HttpHeaders headers = request.getHeaders(); if (upstreamUri == null) { headers.set(X_RECORDING_UPSTREAM_BASE_URI, originalUrl.toString()); headers.set(X_RECORDING_MODE, mode); headers.set(X_RECORDING_ID, xRecordingId); if (mode.equals(RECORD_MODE) && skipRecordingRequestBody) { headers.set(X_RECORDING_SKIP, "request-body"); } } request.setUrl(proxyUrlBuilder.toUrl()); } catch (MalformedURLException e) { throw new RuntimeException(e); } } /** * Sets the response URL back to the original URL before returning it through the pipeline. * @param response The {@link HttpResponse} to modify. * @return The modified response. * @throws RuntimeException Construction of one of the URLs failed. */ /** * Gets the process name of the test proxy binary. * @return The platform specific process name. * @throws UnsupportedOperationException The current OS is not recognized. */ public static String getProxyProcessName() { String osName = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (osName.contains("windows")) { return "Azure.Sdk.Tools.TestProxy.exe"; } else if (osName.contains("linux")) { return "Azure.Sdk.Tools.TestProxy"; } else if (osName.contains("mac os x")) { return "Azure.Sdk.Tools.TestProxy"; } else { throw new UnsupportedOperationException(); } } /** * Checks the return from a request through the test proxy for special error headers. * @param httpResponse The {@link HttpResponse} from the test proxy. */ public static void checkForTestProxyErrors(HttpResponse httpResponse) { String error = httpResponse.getHeaderValue(X_REQUEST_MISMATCH_ERROR); if (error == null) { error = httpResponse.getHeaderValue(X_REQUEST_KNOWN_EXCEPTION_ERROR); } if (error == null) { error = httpResponse.getHeaderValue(X_REQUEST_EXCEPTION_EXCEPTION_ERROR); } if (error != null) { throw LOGGER.logExceptionAsError(new RuntimeException("Test proxy exception: " + new String(Base64.getDecoder().decode(error), StandardCharsets.UTF_8))); } } /** * Finds the test proxy version in the source tree. * @return The version string to use. * @throws RuntimeException The eng folder could not be located in the repo. * @throws UncheckedIOException The version file could not be read properly. */ public static String getTestProxyVersion() { Path rootPath = TestUtils.getRepoRoot(); Path versionFile = Paths.get("eng", "common", "testproxy", "target_version.txt"); rootPath = rootPath.resolve(versionFile); try { return Files.readAllLines(rootPath).get(0).replace(System.getProperty("line.separator"), ""); } catch (IOException e) { throw new UncheckedIOException(e); } } /** * Gets the current URL for the test proxy. * @return The {@link URL} location of the test proxy. * @throws RuntimeException The URL could not be constructed. */ public static URL getProxyUrl() { if (proxyUrl != null) { return proxyUrl; } UrlBuilder builder = new UrlBuilder(); builder.setHost("localhost"); builder.setScheme("http"); builder.setPort(5000); try { proxyUrl = builder.toUrl(); return proxyUrl; } catch (MalformedURLException e) { throw new RuntimeException(e); } } /** * Registers the default set of sanitizers for sanitizing request and responses * @return the list of default sanitizers to be added. */ public static List<TestProxySanitizer> loadSanitizers() { List<TestProxySanitizer> sanitizers = new ArrayList<>(addDefaultRegexSanitizers()); sanitizers.add(addDefaultUrlSanitizer()); sanitizers.addAll(addDefaultBodySanitizers()); sanitizers.addAll(addDefaultHeaderKeySanitizers()); return sanitizers; } private static String createCustomMatcherRequestBody(CustomMatcher customMatcher) { return String.format("{\"ignoredHeaders\":\"%s\",\"excludedHeaders\":\"%s\",\"compareBodies\":%s,\"ignoredQueryParameters\":\"%s\", \"ignoreQueryOrdering\":%s}", getCommaSeperatedString(customMatcher.getHeadersKeyOnlyMatch()), getCommaSeperatedString(customMatcher.getExcludedHeaders()), customMatcher.isComparingBodies(), getCommaSeperatedString(customMatcher.getIgnoredQueryParameters()), customMatcher.isQueryOrderingIgnored()); } private static String getCommaSeperatedString(List<String> stringList) { if (stringList == null) { return null; } return stringList.stream() .filter(s -> s != null && !s.isEmpty()) .collect(Collectors.joining(",")); } private static String createBodyJsonKeyRequestBody(String jsonKey, String regex, String redactedValue) { if (regex == null) { return String.format("{\"value\":\"%s\",\"jsonPath\":\"%s\"}", redactedValue, jsonKey); } else { return String.format("{\"value\":\"%s\",\"jsonPath\":\"%s\",\"regex\":\"%s\"}", redactedValue, jsonKey, regex); } } private static String createRegexRequestBody(String key, String regex, String value, String groupForReplace) { if (key == null) { if (groupForReplace == null) { return String.format("{\"value\":\"%s\",\"regex\":\"%s\"}", value, regex); } else { return String.format("{\"value\":\"%s\",\"regex\":\"%s\",\"groupForReplace\":\"%s\"}", value, regex, groupForReplace); } } else if (regex == null) { return String.format("{\"key\":\"%s\",\"value\":\"%s\"}", key, value); } if (groupForReplace == null) { return String.format("{\"key\":\"%s\",\"value\":\"%s\",\"regex\":\"%s\"}", key, value, regex); } else { return String.format("{\"key\":\"%s\",\"value\":\"%s\",\"regex\":\"%s\",\"groupForReplace\":\"%s\"}", key, value, regex, groupForReplace); } } /** * Creates a list of sanitizer requests to be sent to the test proxy server. * * @param sanitizers the list of sanitizers to be added. * @param proxyUrl The proxyUrl to use when constructing requests. * @return the list of sanitizer {@link HttpRequest requests} to be sent. * @throws RuntimeException if {@link TestProxySanitizerType} is not supported. */ public static List<HttpRequest> getSanitizerRequests(List<TestProxySanitizer> sanitizers, URL proxyUrl) { return sanitizers.stream().map(testProxySanitizer -> { String requestBody; String sanitizerType; switch (testProxySanitizer.getType()) { case URL: sanitizerType = TestProxySanitizerType.URL.getName(); requestBody = createRegexRequestBody(null, testProxySanitizer.getRegex(), testProxySanitizer.getRedactedValue(), testProxySanitizer.getGroupForReplace()); return createHttpRequest(requestBody, sanitizerType, proxyUrl); case BODY_REGEX: sanitizerType = TestProxySanitizerType.BODY_REGEX.getName(); requestBody = createRegexRequestBody(null, testProxySanitizer.getRegex(), testProxySanitizer.getRedactedValue(), testProxySanitizer.getGroupForReplace()); return createHttpRequest(requestBody, sanitizerType, proxyUrl); case BODY_KEY: sanitizerType = TestProxySanitizerType.BODY_KEY.getName(); requestBody = createBodyJsonKeyRequestBody(testProxySanitizer.getKey(), testProxySanitizer.getRegex(), testProxySanitizer.getRedactedValue()); return createHttpRequest(requestBody, sanitizerType, proxyUrl); case HEADER: sanitizerType = HEADER.getName(); if (testProxySanitizer.getKey() == null && testProxySanitizer.getRegex() == null) { throw new RuntimeException( String.format("Missing regexKey and/or headerKey for sanitizer type {%s}", sanitizerType)); } requestBody = createRegexRequestBody(testProxySanitizer.getKey(), testProxySanitizer.getRegex(), testProxySanitizer.getRedactedValue(), testProxySanitizer.getGroupForReplace()); return createHttpRequest(requestBody, sanitizerType, proxyUrl); default: throw new RuntimeException( String.format("Sanitizer type {%s} not supported", testProxySanitizer.getType())); } }).collect(Collectors.toList()); } private static HttpRequest createHttpRequest(String requestBody, String sanitizerType, URL proxyUrl) { HttpRequest request = new HttpRequest(HttpMethod.POST, String.format("%s/Admin/AddSanitizer", proxyUrl.toString())) .setBody(requestBody); request.setHeader(X_ABSTRACTION_IDENTIFIER, sanitizerType); return request; } /** * Creates a {@link List} of {@link HttpRequest} to be sent to the test proxy to register matchers. * @param matchers The {@link TestProxyRequestMatcher}s to encode into requests. * @param proxyUrl The proxyUrl to use when constructing requests. * @return The {@link HttpRequest}s to send to the proxy. * @throws RuntimeException The {@link TestProxyRequestMatcher.TestProxyRequestMatcherType} is unsupported. */ public static List<HttpRequest> getMatcherRequests(List<TestProxyRequestMatcher> matchers, URL proxyUrl) { return matchers.stream().map(testProxyMatcher -> { HttpRequest request; String matcherType; switch (testProxyMatcher.getType()) { case HEADERLESS: matcherType = TestProxyRequestMatcher.TestProxyRequestMatcherType.HEADERLESS.getName(); request = new HttpRequest(HttpMethod.POST, String.format("%s/Admin/setmatcher", proxyUrl.toString())); break; case BODILESS: request = new HttpRequest(HttpMethod.POST, String.format("%s/Admin/setmatcher", proxyUrl.toString())); matcherType = TestProxyRequestMatcher.TestProxyRequestMatcherType.BODILESS.getName(); break; case CUSTOM: CustomMatcher customMatcher = (CustomMatcher) testProxyMatcher; String requestBody = createCustomMatcherRequestBody(customMatcher); matcherType = TestProxyRequestMatcher.TestProxyRequestMatcherType.CUSTOM.getName(); request = new HttpRequest(HttpMethod.POST, String.format("%s/Admin/setmatcher", proxyUrl.toString())).setBody(requestBody); break; default: throw new RuntimeException(String.format("Matcher type {%s} not supported", testProxyMatcher.getType())); } request.setHeader(X_ABSTRACTION_IDENTIFIER, matcherType); return request; }).collect(Collectors.toList()); } private static TestProxySanitizer addDefaultUrlSanitizer() { return new TestProxySanitizer(URL_REGEX, REDACTED_VALUE, TestProxySanitizerType.URL); } private static List<TestProxySanitizer> addDefaultBodySanitizers() { return JSON_PROPERTIES_TO_REDACT.stream() .map(jsonProperty -> new TestProxySanitizer(String.format("$..%s", jsonProperty), null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY)) .collect(Collectors.toList()); } private static List<TestProxySanitizer> addDefaultRegexSanitizers() { List<TestProxySanitizer> regexSanitizers = getUserDelegationSanitizers(); regexSanitizers.addAll(BODY_REGEX_TO_REDACT.stream() .map(bodyRegex -> new TestProxySanitizer(bodyRegex, REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("secret")) .collect(Collectors.toList())); List<TestProxySanitizer> keyRegexSanitizers = new ArrayList<>(); HEADER_KEY_REGEX_TO_REDACT.forEach((key, regex) -> keyRegexSanitizers.add(new TestProxySanitizer(key, regex, REDACTED_VALUE, HEADER))); regexSanitizers.addAll(keyRegexSanitizers); return regexSanitizers; } private static List<TestProxySanitizer> addDefaultHeaderKeySanitizers() { return HEADER_KEYS_TO_REDACT.stream() .map(headerKey -> new TestProxySanitizer(headerKey, null, REDACTED_VALUE, HEADER)) .collect(Collectors.toList()); } private static List<TestProxySanitizer> getUserDelegationSanitizers() { List<TestProxySanitizer> userDelegationSanitizers = new ArrayList<>(); userDelegationSanitizers.add(new TestProxySanitizer(DELEGATION_KEY_CLIENTID_REGEX, REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("secret")); userDelegationSanitizers.add(new TestProxySanitizer(DELEGATION_KEY_TENANTID_REGEX, REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("secret")); return userDelegationSanitizers; } }
class TestProxyUtils { private static final ClientLogger LOGGER = new ClientLogger(TestProxyUtils.class); private static final HttpHeaderName X_RECORDING_SKIP = HttpHeaderName.fromString("x-recording-skip"); private static final List<String> JSON_PROPERTIES_TO_REDACT = new ArrayList<String>( Arrays.asList("authHeader", "accountKey", "accessToken", "accountName", "applicationId", "apiKey", "connectionString", "url", "host", "password", "userName")); private static final Map<String, String> HEADER_KEY_REGEX_TO_REDACT = new HashMap<String, String>() {{ put("Operation-Location", URL_REGEX); put("operation-location", URL_REGEX); }}; private static final List<String> BODY_REGEX_TO_REDACT = new ArrayList<>(Arrays.asList("(?:<Value>)(?<secret>.*)(?:</Value>)", "(?:Password=)(?<secret>.*)(?:;)", "(?:User ID=)(?<secret>.*)(?:;)", "(?:<PrimaryKey>)(?<secret>.*)(?:</PrimaryKey>)", "(?:<SecondaryKey>)(?<secret>.*)(?:</SecondaryKey>)")); private static final String URL_REGEX = "(?<=http: private static final List<String> HEADER_KEYS_TO_REDACT = new ArrayList<>(Arrays.asList("Ocp-Apim-Subscription-Key", "api-key", "x-api-key")); private static final String REDACTED_VALUE = "REDACTED"; private static final String DELEGATION_KEY_CLIENTID_REGEX = "(?:<SignedOid>)(?<secret>.*)(?:</SignedOid>)"; private static final String DELEGATION_KEY_TENANTID_REGEX = "(?:<SignedTid>)(?<secret>.*)(?:</SignedTid>)"; private static final HttpHeaderName X_RECORDING_UPSTREAM_BASE_URI = HttpHeaderName.fromString("x-recording-upstream-base-uri"); private static final HttpHeaderName X_RECORDING_MODE = HttpHeaderName.fromString("x-recording-mode"); private static final HttpHeaderName X_REQUEST_MISMATCH_ERROR = HttpHeaderName.fromString("x-request-mismatch-error"); private static final HttpHeaderName X_REQUEST_KNOWN_EXCEPTION_ERROR = HttpHeaderName.fromString("x-request-known-exception-error"); private static final HttpHeaderName X_REQUEST_EXCEPTION_EXCEPTION_ERROR = HttpHeaderName.fromString("x-request-exception-exception-error"); private static final HttpHeaderName X_ABSTRACTION_IDENTIFIER = HttpHeaderName.fromString("x-abstraction-identifier"); private static volatile URL proxyUrl; /** * Adds headers required for communication with the test proxy. * * @param request The request to add headers to. * @param proxyUrl The {@link URL} the proxy lives at. * @param xRecordingId The x-recording-id value for the current session. * @param mode The current test proxy mode. * @param skipRecordingRequestBody Flag indicating to skip recording request bodies when tests run in Record mode. * @throws RuntimeException Construction of one of the URLs failed. */ public static void changeHeaders(HttpRequest request, URL proxyUrl, String xRecordingId, String mode, boolean skipRecordingRequestBody) { HttpHeader upstreamUri = request.getHeaders().get(X_RECORDING_UPSTREAM_BASE_URI); UrlBuilder proxyUrlBuilder = UrlBuilder.parse(request.getUrl()); proxyUrlBuilder.setScheme(proxyUrl.getProtocol()); proxyUrlBuilder.setHost(proxyUrl.getHost()); if (proxyUrl.getPort() != -1) { proxyUrlBuilder.setPort(proxyUrl.getPort()); } UrlBuilder originalUrlBuilder = UrlBuilder.parse(request.getUrl()); originalUrlBuilder.setPath(""); originalUrlBuilder.setQuery(""); try { URL originalUrl = originalUrlBuilder.toUrl(); HttpHeaders headers = request.getHeaders(); if (upstreamUri == null) { headers.set(X_RECORDING_UPSTREAM_BASE_URI, originalUrl.toString()); headers.set(X_RECORDING_MODE, mode); headers.set(X_RECORDING_ID, xRecordingId); if (mode.equals(RECORD_MODE) && skipRecordingRequestBody) { headers.set(X_RECORDING_SKIP, "request-body"); } } request.setUrl(proxyUrlBuilder.toUrl()); } catch (MalformedURLException e) { throw new RuntimeException(e); } } /** * Sets the response URL back to the original URL before returning it through the pipeline. * @param response The {@link HttpResponse} to modify. * @return The modified response. * @throws RuntimeException Construction of one of the URLs failed. */ /** * Gets the process name of the test proxy binary. * @return The platform specific process name. * @throws UnsupportedOperationException The current OS is not recognized. */ public static String getProxyProcessName() { String osName = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (osName.contains("windows")) { return "Azure.Sdk.Tools.TestProxy.exe"; } else if (osName.contains("linux")) { return "Azure.Sdk.Tools.TestProxy"; } else if (osName.contains("mac os x")) { return "Azure.Sdk.Tools.TestProxy"; } else { throw new UnsupportedOperationException(); } } /** * Checks the return from a request through the test proxy for special error headers. * @param httpResponse The {@link HttpResponse} from the test proxy. */ public static void checkForTestProxyErrors(HttpResponse httpResponse) { String error = httpResponse.getHeaderValue(X_REQUEST_MISMATCH_ERROR); if (error == null) { error = httpResponse.getHeaderValue(X_REQUEST_KNOWN_EXCEPTION_ERROR); } if (error == null) { error = httpResponse.getHeaderValue(X_REQUEST_EXCEPTION_EXCEPTION_ERROR); } if (error != null) { throw LOGGER.logExceptionAsError(new RuntimeException("Test proxy exception: " + new String(Base64.getDecoder().decode(error), StandardCharsets.UTF_8))); } } /** * Finds the test proxy version in the source tree. * @return The version string to use. * @throws RuntimeException The eng folder could not be located in the repo. * @throws UncheckedIOException The version file could not be read properly. */ public static String getTestProxyVersion() { Path rootPath = TestUtils.getRepoRoot(); Path versionFile = Paths.get("eng", "common", "testproxy", "target_version.txt"); rootPath = rootPath.resolve(versionFile); try { return Files.readAllLines(rootPath).get(0).replace(System.getProperty("line.separator"), ""); } catch (IOException e) { throw new UncheckedIOException(e); } } /** * Gets the current URL for the test proxy. * @return The {@link URL} location of the test proxy. * @throws RuntimeException The URL could not be constructed. */ public static URL getProxyUrl() { if (proxyUrl != null) { return proxyUrl; } UrlBuilder builder = new UrlBuilder(); builder.setHost("localhost"); builder.setScheme("http"); builder.setPort(5000); try { proxyUrl = builder.toUrl(); return proxyUrl; } catch (MalformedURLException e) { throw new RuntimeException(e); } } /** * Registers the default set of sanitizers for sanitizing request and responses * @return the list of default sanitizers to be added. */ public static List<TestProxySanitizer> loadSanitizers() { List<TestProxySanitizer> sanitizers = new ArrayList<>(addDefaultRegexSanitizers()); sanitizers.add(addDefaultUrlSanitizer()); sanitizers.addAll(addDefaultBodySanitizers()); sanitizers.addAll(addDefaultHeaderKeySanitizers()); return sanitizers; } private static String createCustomMatcherRequestBody(CustomMatcher customMatcher) { return String.format("{\"ignoredHeaders\":\"%s\",\"excludedHeaders\":\"%s\",\"compareBodies\":%s,\"ignoredQueryParameters\":\"%s\", \"ignoreQueryOrdering\":%s}", getCommaSeperatedString(customMatcher.getHeadersKeyOnlyMatch()), getCommaSeperatedString(customMatcher.getExcludedHeaders()), customMatcher.isComparingBodies(), getCommaSeperatedString(customMatcher.getIgnoredQueryParameters()), customMatcher.isQueryOrderingIgnored()); } private static String getCommaSeperatedString(List<String> stringList) { if (stringList == null) { return null; } return stringList.stream() .filter(s -> s != null && !s.isEmpty()) .collect(Collectors.joining(",")); } private static String createBodyJsonKeyRequestBody(String jsonKey, String regex, String redactedValue) { if (regex == null) { return String.format("{\"value\":\"%s\",\"jsonPath\":\"%s\"}", redactedValue, jsonKey); } else { return String.format("{\"value\":\"%s\",\"jsonPath\":\"%s\",\"regex\":\"%s\"}", redactedValue, jsonKey, regex); } } private static String createRegexRequestBody(String key, String regex, String value, String groupForReplace) { if (key == null) { if (groupForReplace == null) { return String.format("{\"value\":\"%s\",\"regex\":\"%s\"}", value, regex); } else { return String.format("{\"value\":\"%s\",\"regex\":\"%s\",\"groupForReplace\":\"%s\"}", value, regex, groupForReplace); } } else if (regex == null) { return String.format("{\"key\":\"%s\",\"value\":\"%s\"}", key, value); } if (groupForReplace == null) { return String.format("{\"key\":\"%s\",\"value\":\"%s\",\"regex\":\"%s\"}", key, value, regex); } else { return String.format("{\"key\":\"%s\",\"value\":\"%s\",\"regex\":\"%s\",\"groupForReplace\":\"%s\"}", key, value, regex, groupForReplace); } } /** * Creates a list of sanitizer requests to be sent to the test proxy server. * * @param sanitizers the list of sanitizers to be added. * @param proxyUrl The proxyUrl to use when constructing requests. * @return the list of sanitizer {@link HttpRequest requests} to be sent. * @throws RuntimeException if {@link TestProxySanitizerType} is not supported. */ public static List<HttpRequest> getSanitizerRequests(List<TestProxySanitizer> sanitizers, URL proxyUrl) { return sanitizers.stream().map(testProxySanitizer -> { String requestBody; String sanitizerType; switch (testProxySanitizer.getType()) { case URL: sanitizerType = TestProxySanitizerType.URL.getName(); requestBody = createRegexRequestBody(null, testProxySanitizer.getRegex(), testProxySanitizer.getRedactedValue(), testProxySanitizer.getGroupForReplace()); return createHttpRequest(requestBody, sanitizerType, proxyUrl); case BODY_REGEX: sanitizerType = TestProxySanitizerType.BODY_REGEX.getName(); requestBody = createRegexRequestBody(null, testProxySanitizer.getRegex(), testProxySanitizer.getRedactedValue(), testProxySanitizer.getGroupForReplace()); return createHttpRequest(requestBody, sanitizerType, proxyUrl); case BODY_KEY: sanitizerType = TestProxySanitizerType.BODY_KEY.getName(); requestBody = createBodyJsonKeyRequestBody(testProxySanitizer.getKey(), testProxySanitizer.getRegex(), testProxySanitizer.getRedactedValue()); return createHttpRequest(requestBody, sanitizerType, proxyUrl); case HEADER: sanitizerType = HEADER.getName(); if (testProxySanitizer.getKey() == null && testProxySanitizer.getRegex() == null) { throw new RuntimeException( String.format("Missing regexKey and/or headerKey for sanitizer type {%s}", sanitizerType)); } requestBody = createRegexRequestBody(testProxySanitizer.getKey(), testProxySanitizer.getRegex(), testProxySanitizer.getRedactedValue(), testProxySanitizer.getGroupForReplace()); return createHttpRequest(requestBody, sanitizerType, proxyUrl); default: throw new RuntimeException( String.format("Sanitizer type {%s} not supported", testProxySanitizer.getType())); } }).collect(Collectors.toList()); } private static HttpRequest createHttpRequest(String requestBody, String sanitizerType, URL proxyUrl) { HttpRequest request = new HttpRequest(HttpMethod.POST, String.format("%s/Admin/AddSanitizer", proxyUrl.toString())) .setBody(requestBody); request.setHeader(X_ABSTRACTION_IDENTIFIER, sanitizerType); return request; } /** * Creates a {@link List} of {@link HttpRequest} to be sent to the test proxy to register matchers. * @param matchers The {@link TestProxyRequestMatcher}s to encode into requests. * @param proxyUrl The proxyUrl to use when constructing requests. * @return The {@link HttpRequest}s to send to the proxy. * @throws RuntimeException The {@link TestProxyRequestMatcher.TestProxyRequestMatcherType} is unsupported. */ public static List<HttpRequest> getMatcherRequests(List<TestProxyRequestMatcher> matchers, URL proxyUrl) { return matchers.stream().map(testProxyMatcher -> { HttpRequest request; String matcherType; switch (testProxyMatcher.getType()) { case HEADERLESS: matcherType = TestProxyRequestMatcher.TestProxyRequestMatcherType.HEADERLESS.getName(); request = new HttpRequest(HttpMethod.POST, String.format("%s/Admin/setmatcher", proxyUrl.toString())); break; case BODILESS: request = new HttpRequest(HttpMethod.POST, String.format("%s/Admin/setmatcher", proxyUrl.toString())); matcherType = TestProxyRequestMatcher.TestProxyRequestMatcherType.BODILESS.getName(); break; case CUSTOM: CustomMatcher customMatcher = (CustomMatcher) testProxyMatcher; String requestBody = createCustomMatcherRequestBody(customMatcher); matcherType = TestProxyRequestMatcher.TestProxyRequestMatcherType.CUSTOM.getName(); request = new HttpRequest(HttpMethod.POST, String.format("%s/Admin/setmatcher", proxyUrl.toString())).setBody(requestBody); break; default: throw new RuntimeException(String.format("Matcher type {%s} not supported", testProxyMatcher.getType())); } request.setHeader(X_ABSTRACTION_IDENTIFIER, matcherType); return request; }).collect(Collectors.toList()); } private static TestProxySanitizer addDefaultUrlSanitizer() { return new TestProxySanitizer(URL_REGEX, REDACTED_VALUE, TestProxySanitizerType.URL); } private static List<TestProxySanitizer> addDefaultBodySanitizers() { return JSON_PROPERTIES_TO_REDACT.stream() .map(jsonProperty -> new TestProxySanitizer(String.format("$..%s", jsonProperty), null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY)) .collect(Collectors.toList()); } private static List<TestProxySanitizer> addDefaultRegexSanitizers() { List<TestProxySanitizer> regexSanitizers = getUserDelegationSanitizers(); regexSanitizers.addAll(BODY_REGEX_TO_REDACT.stream() .map(bodyRegex -> new TestProxySanitizer(bodyRegex, REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("secret")) .collect(Collectors.toList())); List<TestProxySanitizer> keyRegexSanitizers = new ArrayList<>(); HEADER_KEY_REGEX_TO_REDACT.forEach((key, regex) -> keyRegexSanitizers.add(new TestProxySanitizer(key, regex, REDACTED_VALUE, HEADER))); regexSanitizers.addAll(keyRegexSanitizers); return regexSanitizers; } private static List<TestProxySanitizer> addDefaultHeaderKeySanitizers() { return HEADER_KEYS_TO_REDACT.stream() .map(headerKey -> new TestProxySanitizer(headerKey, null, REDACTED_VALUE, HEADER)) .collect(Collectors.toList()); } private static List<TestProxySanitizer> getUserDelegationSanitizers() { List<TestProxySanitizer> userDelegationSanitizers = new ArrayList<>(); userDelegationSanitizers.add(new TestProxySanitizer(DELEGATION_KEY_CLIENTID_REGEX, REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("secret")); userDelegationSanitizers.add(new TestProxySanitizer(DELEGATION_KEY_TENANTID_REGEX, REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("secret")); return userDelegationSanitizers; } }
Does this need to check the return here? My impression is that this policy as written will check the outgoing request before it is modified by `TestProxyRecordingPolicy`.
public void testResetTestProxyData() throws MalformedURLException { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); HttpPipelinePolicy auditorPolicy = (context, next) -> { HttpHeaders headers = context.getHttpRequest().getHeaders(); String headerValue = headers.getValue(HttpHeaderName.fromString("x-recording-upstream-base-uri")); Assertions.assertNull(headerValue); return next.process(); }; final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(auditorPolicy, interceptorManager.getRecordPolicy()) .build(); pipeline.sendSync(new HttpRequest(HttpMethod.GET, new URL("http: }
return next.process();
public void testResetTestProxyData() throws MalformedURLException { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()) .build(); try (HttpResponse response = pipeline.sendSync( new HttpRequest(HttpMethod.GET, new URL("http: assertEquals(200, response.getStatusCode()); HttpHeaders headers = response.getRequest().getHeaders(); assertNull(headers.get(HttpHeaderName.fromString("x-recording-upstream-base-uri"))); assertNull(headers.get(HttpHeaderName.fromString("x-recording-mode"))); assertNull(headers.get(HttpHeaderName.fromString("x-recording-id"))); assertNull(headers.get(HttpHeaderName.fromString("x-recording-skip"))); } }
class TestProxyTests extends TestProxyTestBase { public static final String TEST_DATA = "{\"test\":\"proxy\"}"; static TestProxyTestServer server; private static final ObjectMapper RECORD_MAPPER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT); private static final List<TestProxySanitizer> CUSTOM_SANITIZER = new ArrayList<>(); public static final String REDACTED = "REDACTED"; private static final HttpHeaderName OCP_APIM_SUBSCRIPTION_KEY = HttpHeaderName.fromString("Ocp-Apim-Subscription-Key"); static { CUSTOM_SANITIZER.add(new TestProxySanitizer("$..modelId", null, REDACTED, TestProxySanitizerType.BODY_KEY)); CUSTOM_SANITIZER.add(new TestProxySanitizer("TableName\\\"*:*\\\"(?<tablename>.*)\\\"", REDACTED, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("tablename")); } @BeforeAll public static void setupClass() { server = new TestProxyTestServer(); } @AfterAll public static void teardownClass() { server.close(); } @Test @Tag("Record") public void testBasicRecord() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url = null; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setScheme("http").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } testResourceNamer.randomName("test", 10); testResourceNamer.now(); HttpRequest request = new HttpRequest(HttpMethod.GET, url); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } } @Test @Tag("Playback") public void testOrdering() { String name = testResourceNamer.randomName("test", 10); assertEquals("test32950", name); } @Test @Tag("Record") @DoNotRecord public void testDoNotRecord() { testResourceNamer.now(); } @Test @Tag("Playback") @DoNotRecord public void testDoNotPlayback() { testResourceNamer.now(); } @Test @Tag("Playback") public void testMismatch() { HttpClient client = interceptorManager.getPlaybackClient(); URL url; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setScheme("http").setPath("first/path").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url); RuntimeException thrown = assertThrows(RuntimeException.class, () -> client.sendSync(request, Context.NONE)); assertTrue(thrown.getMessage().contains("Uri doesn't match")); } @Test @Tag("Record") @Test @Tag("Record") @RecordWithoutRequestBody public void testRecordWithPath() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setPath("first/path").setScheme("http").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } testResourceNamer.randomName("test", 10); testResourceNamer.now(); HttpRequest request = new HttpRequest(HttpMethod.POST, url).setBody(TEST_DATA) .setHeader(HttpHeaderName.CONTENT_TYPE, "application/json") .setHeader(HttpHeaderName.CONTENT_LENGTH, String.valueOf(TEST_DATA.length())); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } } @Test @Tag("Record") public void testRecordWithHeaders() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setPath("echoheaders").setScheme("http").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } testResourceNamer.randomName("test", 10); testResourceNamer.now(); HttpRequest request = new HttpRequest(HttpMethod.GET, url) .setHeader(HttpHeaderName.fromString("header1"), "value1") .setHeader(HttpHeaderName.fromString("header2"), "value2"); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } } @Test @Tag("Playback") public void testPlayback() { HttpClient client = interceptorManager.getPlaybackClient(); URL url; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setPath("first/path").setScheme("http").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url) .setHeader(HttpHeaderName.ACCEPT, "*/*"); try (HttpResponse response = client.sendSync(request, Context.NONE)) { assertEquals("first path", response.getBodyAsBinaryData().toString()); assertEquals(200, response.getStatusCode()); } } @Test @Tag("Live") public void testCannotGetPlaybackClient() { RuntimeException thrown = assertThrows(IllegalStateException.class, () -> interceptorManager.getPlaybackClient()); assertEquals("A playback client can only be requested in PLAYBACK mode.", thrown.getMessage()); } @Test @Tag("Live") public void testCannotGetRecordPolicy() { RuntimeException thrown = assertThrows(IllegalStateException.class, () -> interceptorManager.getRecordPolicy()); assertEquals("A recording policy can only be requested in RECORD mode.", thrown.getMessage()); } @Test @Tag("Record") public void testRecordWithRedaction() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); interceptorManager.addSanitizers(CUSTOM_SANITIZER); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder() .setHost("localhost") .setPath("/fr/path/1") .setPort(3000) .setScheme("http") .toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url) .setHeader(OCP_APIM_SUBSCRIPTION_KEY, "SECRET_API_KEY") .setHeader(HttpHeaderName.CONTENT_TYPE, "application/json") .setHeader(HttpHeaderName.ACCEPT, "*/*"); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(response.getStatusCode(), 200); assertEquals(200, response.getStatusCode()); RecordedTestProxyData recordedTestProxyData = readDataFromFile(); RecordedTestProxyData.TestProxyDataRecord record = recordedTestProxyData.getTestProxyDataRecords().get(0); assertEquals("http: assertEquals(REDACTED, record.getHeaders().get("Ocp-Apim-Subscription-Key")); assertTrue(record.getResponseHeaders().get("Operation-Location") .startsWith("https: assertEquals(REDACTED, record.getResponse().get("modelId")); } } @Test @Tag("Playback") public void testPlaybackWithRedaction() { interceptorManager.addSanitizers(CUSTOM_SANITIZER); interceptorManager.addMatchers(new ArrayList<>(Arrays.asList(new CustomMatcher() .setExcludedHeaders(Arrays.asList("Ocp-Apim-Subscription-Key"))))); HttpClient client = interceptorManager.getPlaybackClient(); URL url; try { url = new UrlBuilder() .setHost("localhost") .setPort(3000) .setPath("/fr/models") .setScheme("http") .toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url) .setHeader(OCP_APIM_SUBSCRIPTION_KEY, "SECRET_API_KEY") .setHeader(HttpHeaderName.CONTENT_TYPE, "application/json") .setHeader(HttpHeaderName.ACCEPT, "*/*"); try (HttpResponse response = client.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } } @Test @Tag("Record") public void testBodyRegexRedactRecord() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); interceptorManager.addSanitizers(CUSTOM_SANITIZER); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder() .setHost("localhost") .setPath("/fr/path/2") .setPort(3000) .setScheme("http") .toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url); request.setHeader(HttpHeaderName.CONTENT_TYPE, "application/json"); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } RecordedTestProxyData recordedTestProxyData = readDataFromFile(); RecordedTestProxyData.TestProxyDataRecord record = recordedTestProxyData.getTestProxyDataRecords().get(0); assertEquals("http: assertTrue(record.getResponse().get("Body").contains("<UserDelegationKey><SignedTid>REDACTED</SignedTid></UserDelegationKey>")); assertTrue(record.getResponse().get("primaryKey").contains("<PrimaryKey>REDACTED</PrimaryKey>")); assertEquals(record.getResponse().get("TableName"), REDACTED); } @Test @Tag("Live") public void canGetTestProxyVersion() { String version = TestProxyUtils.getTestProxyVersion(); assertNotNull(version); } @Test @Tag("Record") public void testRecordWithRedirect() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(new RedirectPolicy(), interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder() .setHost("localhost") .setPath("/fr/path/3") .setPort(3000) .setScheme("http") .toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(response.getStatusCode(), 200); } } private RecordedTestProxyData readDataFromFile() { String filePath = Paths.get(TestUtils.getRecordFolder().getPath(), this.testContextManager.getTestPlaybackRecordingName()) + ".json"; File recordFile = new File(filePath); try (BufferedReader reader = Files.newBufferedReader(recordFile.toPath())) { return RECORD_MAPPER.readValue(reader, RecordedTestProxyData.class); } catch (IOException ex) { throw new UncheckedIOException(ex); } } @JsonIgnoreProperties(ignoreUnknown = true) static class RecordedTestProxyData { @JsonProperty("Entries") private final LinkedList<TestProxyDataRecord> testProxyDataRecords; RecordedTestProxyData() { testProxyDataRecords = new LinkedList<>(); } public LinkedList<TestProxyDataRecord> getTestProxyDataRecords() { return testProxyDataRecords; } @JsonIgnoreProperties(ignoreUnknown = true) static class TestProxyDataRecord { @JsonProperty("RequestMethod") private String method; @JsonProperty("RequestUri") private String uri; @JsonProperty("RequestHeaders") private Map<String, String> headers; @JsonProperty("ResponseBody") private Map<String, String> response; @JsonProperty("ResponseHeaders") private Map<String, String> responseHeaders; @JsonProperty("RequestBody") private String requestBody; public String getMethod() { return method; } public String getUri() { return uri; } public Map<String, String> getHeaders() { return headers; } public Map<String, String> getResponse() { return response; } public Map<String, String> getResponseHeaders() { return responseHeaders; } public String getRequestBody() { return requestBody; } } } }
class TestProxyTests extends TestProxyTestBase { public static final String TEST_DATA = "{\"test\":\"proxy\"}"; static TestProxyTestServer server; private static final ObjectMapper RECORD_MAPPER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT); private static final List<TestProxySanitizer> CUSTOM_SANITIZER = new ArrayList<>(); public static final String REDACTED = "REDACTED"; private static final HttpHeaderName OCP_APIM_SUBSCRIPTION_KEY = HttpHeaderName.fromString("Ocp-Apim-Subscription-Key"); static { CUSTOM_SANITIZER.add(new TestProxySanitizer("$..modelId", null, REDACTED, TestProxySanitizerType.BODY_KEY)); CUSTOM_SANITIZER.add(new TestProxySanitizer("TableName\\\"*:*\\\"(?<tablename>.*)\\\"", REDACTED, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("tablename")); } @BeforeAll public static void setupClass() { server = new TestProxyTestServer(); } @AfterAll public static void teardownClass() { server.close(); } @Test @Tag("Record") public void testBasicRecord() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url = null; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setScheme("http").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } testResourceNamer.randomName("test", 10); testResourceNamer.now(); HttpRequest request = new HttpRequest(HttpMethod.GET, url); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } } @Test @Tag("Playback") public void testOrdering() { String name = testResourceNamer.randomName("test", 10); assertEquals("test32950", name); } @Test @Tag("Record") @DoNotRecord public void testDoNotRecord() { testResourceNamer.now(); } @Test @Tag("Playback") @DoNotRecord public void testDoNotPlayback() { testResourceNamer.now(); } @Test @Tag("Playback") public void testMismatch() { HttpClient client = interceptorManager.getPlaybackClient(); URL url; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setScheme("http").setPath("first/path").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url); RuntimeException thrown = assertThrows(RuntimeException.class, () -> client.sendSync(request, Context.NONE)); assertTrue(thrown.getMessage().contains("Uri doesn't match")); } @Test @Tag("Record") @RecordWithoutRequestBody public void testRecordWithPath() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setPath("first/path").setScheme("http").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } testResourceNamer.randomName("test", 10); testResourceNamer.now(); HttpRequest request = new HttpRequest(HttpMethod.POST, url).setBody(TEST_DATA) .setHeader(HttpHeaderName.CONTENT_TYPE, "application/json") .setHeader(HttpHeaderName.CONTENT_LENGTH, String.valueOf(TEST_DATA.length())); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } } @Test @Tag("Record") public void testRecordWithHeaders() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setPath("echoheaders").setScheme("http").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } testResourceNamer.randomName("test", 10); testResourceNamer.now(); HttpRequest request = new HttpRequest(HttpMethod.GET, url) .setHeader(HttpHeaderName.fromString("header1"), "value1") .setHeader(HttpHeaderName.fromString("header2"), "value2"); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } } @Test @Tag("Playback") public void testPlayback() { HttpClient client = interceptorManager.getPlaybackClient(); URL url; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setPath("first/path").setScheme("http").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url) .setHeader(HttpHeaderName.ACCEPT, "*/*"); try (HttpResponse response = client.sendSync(request, Context.NONE)) { assertEquals("first path", response.getBodyAsBinaryData().toString()); assertEquals(200, response.getStatusCode()); } } @Test @Tag("Live") public void testCannotGetPlaybackClient() { RuntimeException thrown = assertThrows(IllegalStateException.class, () -> interceptorManager.getPlaybackClient()); assertEquals("A playback client can only be requested in PLAYBACK mode.", thrown.getMessage()); } @Test @Tag("Live") public void testCannotGetRecordPolicy() { RuntimeException thrown = assertThrows(IllegalStateException.class, () -> interceptorManager.getRecordPolicy()); assertEquals("A recording policy can only be requested in RECORD mode.", thrown.getMessage()); } @Test @Tag("Record") public void testRecordWithRedaction() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); interceptorManager.addSanitizers(CUSTOM_SANITIZER); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder() .setHost("localhost") .setPath("/fr/path/1") .setPort(3000) .setScheme("http") .toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url) .setHeader(OCP_APIM_SUBSCRIPTION_KEY, "SECRET_API_KEY") .setHeader(HttpHeaderName.CONTENT_TYPE, "application/json") .setHeader(HttpHeaderName.ACCEPT, "*/*"); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(response.getStatusCode(), 200); assertEquals(200, response.getStatusCode()); RecordedTestProxyData recordedTestProxyData = readDataFromFile(); RecordedTestProxyData.TestProxyDataRecord record = recordedTestProxyData.getTestProxyDataRecords().get(0); assertEquals("http: assertEquals(REDACTED, record.getHeaders().get("Ocp-Apim-Subscription-Key")); assertTrue(record.getResponseHeaders().get("Operation-Location") .startsWith("https: assertEquals(REDACTED, record.getResponse().get("modelId")); } } @Test @Tag("Playback") public void testPlaybackWithRedaction() { interceptorManager.addSanitizers(CUSTOM_SANITIZER); interceptorManager.addMatchers(new ArrayList<>(Arrays.asList(new CustomMatcher() .setExcludedHeaders(Arrays.asList("Ocp-Apim-Subscription-Key"))))); HttpClient client = interceptorManager.getPlaybackClient(); URL url; try { url = new UrlBuilder() .setHost("localhost") .setPort(3000) .setPath("/fr/models") .setScheme("http") .toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url) .setHeader(OCP_APIM_SUBSCRIPTION_KEY, "SECRET_API_KEY") .setHeader(HttpHeaderName.CONTENT_TYPE, "application/json") .setHeader(HttpHeaderName.ACCEPT, "*/*"); try (HttpResponse response = client.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } } @Test @Tag("Record") public void testBodyRegexRedactRecord() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); interceptorManager.addSanitizers(CUSTOM_SANITIZER); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder() .setHost("localhost") .setPath("/fr/path/2") .setPort(3000) .setScheme("http") .toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url); request.setHeader(HttpHeaderName.CONTENT_TYPE, "application/json"); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } RecordedTestProxyData recordedTestProxyData = readDataFromFile(); RecordedTestProxyData.TestProxyDataRecord record = recordedTestProxyData.getTestProxyDataRecords().get(0); assertEquals("http: assertTrue(record.getResponse().get("Body").contains("<UserDelegationKey><SignedTid>REDACTED</SignedTid></UserDelegationKey>")); assertTrue(record.getResponse().get("primaryKey").contains("<PrimaryKey>REDACTED</PrimaryKey>")); assertEquals(record.getResponse().get("TableName"), REDACTED); } @Test @Tag("Live") public void canGetTestProxyVersion() { String version = TestProxyUtils.getTestProxyVersion(); assertNotNull(version); } @Test @Tag("Record") @Test @Tag("Record") public void testRecordWithRedirect() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(new RedirectPolicy(), interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder() .setHost("localhost") .setPath("/getRedirect") .setPort(3000) .setScheme("http") .toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); assertEquals("http: assertNull(response.getRequest().getHeaders().get(HttpHeaderName.fromString("x-recording-upstream-base-uri"))); } } private RecordedTestProxyData readDataFromFile() { String filePath = Paths.get(TestUtils.getRecordFolder().getPath(), this.testContextManager.getTestPlaybackRecordingName()) + ".json"; File recordFile = new File(filePath); try (BufferedReader reader = Files.newBufferedReader(recordFile.toPath())) { return RECORD_MAPPER.readValue(reader, RecordedTestProxyData.class); } catch (IOException ex) { throw new UncheckedIOException(ex); } } @JsonIgnoreProperties(ignoreUnknown = true) static class RecordedTestProxyData { @JsonProperty("Entries") private final LinkedList<TestProxyDataRecord> testProxyDataRecords; RecordedTestProxyData() { testProxyDataRecords = new LinkedList<>(); } public LinkedList<TestProxyDataRecord> getTestProxyDataRecords() { return testProxyDataRecords; } @JsonIgnoreProperties(ignoreUnknown = true) static class TestProxyDataRecord { @JsonProperty("RequestMethod") private String method; @JsonProperty("RequestUri") private String uri; @JsonProperty("RequestHeaders") private Map<String, String> headers; @JsonProperty("ResponseBody") private Map<String, String> response; @JsonProperty("ResponseHeaders") private Map<String, String> responseHeaders; @JsonProperty("RequestBody") private String requestBody; public String getMethod() { return method; } public String getUri() { return uri; } public Map<String, String> getHeaders() { return headers; } public Map<String, String> getResponse() { return response; } public Map<String, String> getResponseHeaders() { return responseHeaders; } public String getRequestBody() { return requestBody; } } } }
I know this is just testing but I think this is a great test we should leave in, so let's make the path reflect that. `/getRedirect` or something.
public TestProxyTestServer() { server = HttpServer.create() .host("localhost") .port(3000) .route(routes -> routes .get("/", (req, res) -> res.status(HttpResponseStatus.OK).sendString(Mono.just("hello world"))) .post("/first/path", (req, res) -> res.status(HttpResponseStatus.OK).sendString(Mono.just("first path"))) .get("/echoheaders", (req, res) -> { for (Map.Entry<String, String> requestHeader : req.requestHeaders()) { res.addHeader(requestHeader.getKey(), requestHeader.getValue()); } return res.status(HttpResponseStatus.OK).sendString(Mono.just("echoheaders")); }) .get("/fr/path/1", (req, res) -> { for (Map.Entry<String, String> requestHeader : req.requestHeaders()) { res.addHeader(requestHeader.getKey(), requestHeader.getValue()); } return res.status(HttpResponseStatus.OK) .addHeader("Content-Type", "application/json") .addHeader("Operation-Location", "https: .sendString(Mono.just(TEST_JSON_RESPONSE_BODY)); }) .get("/fr/path/2", (req, res) -> res.status(HttpResponseStatus.OK) .addHeader("Content-Type", "application/json") .sendString(Mono.just(TEST_XML_RESPONSE_BODY))) .get("/fr/path/3", (req, res) -> { return res.status(HttpResponseStatus.TEMPORARY_REDIRECT) .addHeader("Content-Type", "application/json") .addHeader("Location", url.toString()); })) .bindNow(); }
.get("/fr/path/3", (req, res) -> {
public TestProxyTestServer() { server = HttpServer.create() .host("localhost") .port(3000) .route(routes -> routes .get("/", (req, res) -> res.status(HttpResponseStatus.OK).sendString(Mono.just("hello world"))) .post("/first/path", (req, res) -> res.status(HttpResponseStatus.OK).sendString(Mono.just("first path"))) .get("/echoheaders", (req, res) -> { for (Map.Entry<String, String> requestHeader : req.requestHeaders()) { res.addHeader(requestHeader.getKey(), requestHeader.getValue()); } return res.status(HttpResponseStatus.OK).sendString(Mono.just("echoheaders")); }) .get("/fr/path/1", (req, res) -> { for (Map.Entry<String, String> requestHeader : req.requestHeaders()) { res.addHeader(requestHeader.getKey(), requestHeader.getValue()); } return res.status(HttpResponseStatus.OK) .addHeader("Content-Type", "application/json") .addHeader("Operation-Location", "https: .sendString(Mono.just(TEST_JSON_RESPONSE_BODY)); }) .get("/fr/path/2", (req, res) -> res.status(HttpResponseStatus.OK) .addHeader("Content-Type", "application/json") .sendString(Mono.just(TEST_XML_RESPONSE_BODY))) .get("/getRedirect", (req, res) -> { return res.status(HttpResponseStatus.TEMPORARY_REDIRECT) .addHeader("Content-Type", "application/json") .addHeader("Location", url.toString()); })) .bindNow(); }
class TestProxyTestServer implements Closeable { private final DisposableServer server; private static final String TEST_JSON_RESPONSE_BODY = "{\"modelId\":\"0cd2728b-210e-4c05-b706-f70554276bcc\",\"createdDateTime\":\"2022-08-31T00:00:00Z\",\"apiVersion\":\"2022-08-31\", \"accountKey\" : \"secret_account_key\"}"; private static final String TEST_XML_RESPONSE_BODY = "{\"Body\":\"<UserDelegationKey><SignedTid>sensitiveInformation=</SignedTid></UserDelegationKey>\",\"primaryKey\":\"<PrimaryKey>fakePrimaryKey</PrimaryKey>\", \"TableName\":\"listtable09bf2a3d\"}"; URL url; { try { url = new UrlBuilder().setHost("localhost").setPort(3000).setScheme("http").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } } /** * Constructor for TestProxyTestServer */ @Override public void close() { server.disposeNow(); } }
class TestProxyTestServer implements Closeable { private final DisposableServer server; private static final String TEST_JSON_RESPONSE_BODY = "{\"modelId\":\"0cd2728b-210e-4c05-b706-f70554276bcc\",\"createdDateTime\":\"2022-08-31T00:00:00Z\",\"apiVersion\":\"2022-08-31\", \"accountKey\" : \"secret_account_key\"}"; private static final String TEST_XML_RESPONSE_BODY = "{\"Body\":\"<UserDelegationKey><SignedTid>sensitiveInformation=</SignedTid></UserDelegationKey>\",\"primaryKey\":\"<PrimaryKey>fakePrimaryKey</PrimaryKey>\", \"TableName\":\"listtable09bf2a3d\"}"; URL url; { try { url = new UrlBuilder().setHost("localhost").setPort(3000).setScheme("http").setPath("echoheaders").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } } /** * Constructor for TestProxyTestServer */ @Override public void close() { server.disposeNow(); } }
👍
public void testRecordWithRedirect() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(new RedirectPolicy(), interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder() .setHost("localhost") .setPath("/getRedirect") .setPort(3000) .setScheme("http") .toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); assertEquals("http: assertNull(response.getRequest().getHeaders().get(HttpHeaderName.fromString("x-recording-upstream-base-uri"))); } }
.policies(new RedirectPolicy(), interceptorManager.getRecordPolicy()).build();
public void testRecordWithRedirect() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(new RedirectPolicy(), interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder() .setHost("localhost") .setPath("/getRedirect") .setPort(3000) .setScheme("http") .toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); assertEquals("http: assertNull(response.getRequest().getHeaders().get(HttpHeaderName.fromString("x-recording-upstream-base-uri"))); } }
class TestProxyTests extends TestProxyTestBase { public static final String TEST_DATA = "{\"test\":\"proxy\"}"; static TestProxyTestServer server; private static final ObjectMapper RECORD_MAPPER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT); private static final List<TestProxySanitizer> CUSTOM_SANITIZER = new ArrayList<>(); public static final String REDACTED = "REDACTED"; private static final HttpHeaderName OCP_APIM_SUBSCRIPTION_KEY = HttpHeaderName.fromString("Ocp-Apim-Subscription-Key"); static { CUSTOM_SANITIZER.add(new TestProxySanitizer("$..modelId", null, REDACTED, TestProxySanitizerType.BODY_KEY)); CUSTOM_SANITIZER.add(new TestProxySanitizer("TableName\\\"*:*\\\"(?<tablename>.*)\\\"", REDACTED, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("tablename")); } @BeforeAll public static void setupClass() { server = new TestProxyTestServer(); } @AfterAll public static void teardownClass() { server.close(); } @Test @Tag("Record") public void testBasicRecord() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url = null; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setScheme("http").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } testResourceNamer.randomName("test", 10); testResourceNamer.now(); HttpRequest request = new HttpRequest(HttpMethod.GET, url); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } } @Test @Tag("Playback") public void testOrdering() { String name = testResourceNamer.randomName("test", 10); assertEquals("test32950", name); } @Test @Tag("Record") @DoNotRecord public void testDoNotRecord() { testResourceNamer.now(); } @Test @Tag("Playback") @DoNotRecord public void testDoNotPlayback() { testResourceNamer.now(); } @Test @Tag("Playback") public void testMismatch() { HttpClient client = interceptorManager.getPlaybackClient(); URL url; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setScheme("http").setPath("first/path").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url); RuntimeException thrown = assertThrows(RuntimeException.class, () -> client.sendSync(request, Context.NONE)); assertTrue(thrown.getMessage().contains("Uri doesn't match")); } @Test @Tag("Record") @RecordWithoutRequestBody public void testRecordWithPath() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setPath("first/path").setScheme("http").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } testResourceNamer.randomName("test", 10); testResourceNamer.now(); HttpRequest request = new HttpRequest(HttpMethod.POST, url).setBody(TEST_DATA) .setHeader(HttpHeaderName.CONTENT_TYPE, "application/json") .setHeader(HttpHeaderName.CONTENT_LENGTH, String.valueOf(TEST_DATA.length())); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } } @Test @Tag("Record") public void testRecordWithHeaders() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setPath("echoheaders").setScheme("http").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } testResourceNamer.randomName("test", 10); testResourceNamer.now(); HttpRequest request = new HttpRequest(HttpMethod.GET, url) .setHeader(HttpHeaderName.fromString("header1"), "value1") .setHeader(HttpHeaderName.fromString("header2"), "value2"); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } } @Test @Tag("Playback") public void testPlayback() { HttpClient client = interceptorManager.getPlaybackClient(); URL url; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setPath("first/path").setScheme("http").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url) .setHeader(HttpHeaderName.ACCEPT, "*/*"); try (HttpResponse response = client.sendSync(request, Context.NONE)) { assertEquals("first path", response.getBodyAsBinaryData().toString()); assertEquals(200, response.getStatusCode()); } } @Test @Tag("Live") public void testCannotGetPlaybackClient() { RuntimeException thrown = assertThrows(IllegalStateException.class, () -> interceptorManager.getPlaybackClient()); assertEquals("A playback client can only be requested in PLAYBACK mode.", thrown.getMessage()); } @Test @Tag("Live") public void testCannotGetRecordPolicy() { RuntimeException thrown = assertThrows(IllegalStateException.class, () -> interceptorManager.getRecordPolicy()); assertEquals("A recording policy can only be requested in RECORD mode.", thrown.getMessage()); } @Test @Tag("Record") public void testRecordWithRedaction() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); interceptorManager.addSanitizers(CUSTOM_SANITIZER); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder() .setHost("localhost") .setPath("/fr/path/1") .setPort(3000) .setScheme("http") .toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url) .setHeader(OCP_APIM_SUBSCRIPTION_KEY, "SECRET_API_KEY") .setHeader(HttpHeaderName.CONTENT_TYPE, "application/json") .setHeader(HttpHeaderName.ACCEPT, "*/*"); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(response.getStatusCode(), 200); assertEquals(200, response.getStatusCode()); RecordedTestProxyData recordedTestProxyData = readDataFromFile(); RecordedTestProxyData.TestProxyDataRecord record = recordedTestProxyData.getTestProxyDataRecords().get(0); assertEquals("http: assertEquals(REDACTED, record.getHeaders().get("Ocp-Apim-Subscription-Key")); assertTrue(record.getResponseHeaders().get("Operation-Location") .startsWith("https: assertEquals(REDACTED, record.getResponse().get("modelId")); } } @Test @Tag("Playback") public void testPlaybackWithRedaction() { interceptorManager.addSanitizers(CUSTOM_SANITIZER); interceptorManager.addMatchers(new ArrayList<>(Arrays.asList(new CustomMatcher() .setExcludedHeaders(Arrays.asList("Ocp-Apim-Subscription-Key"))))); HttpClient client = interceptorManager.getPlaybackClient(); URL url; try { url = new UrlBuilder() .setHost("localhost") .setPort(3000) .setPath("/fr/models") .setScheme("http") .toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url) .setHeader(OCP_APIM_SUBSCRIPTION_KEY, "SECRET_API_KEY") .setHeader(HttpHeaderName.CONTENT_TYPE, "application/json") .setHeader(HttpHeaderName.ACCEPT, "*/*"); try (HttpResponse response = client.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } } @Test @Tag("Record") public void testBodyRegexRedactRecord() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); interceptorManager.addSanitizers(CUSTOM_SANITIZER); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder() .setHost("localhost") .setPath("/fr/path/2") .setPort(3000) .setScheme("http") .toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url); request.setHeader(HttpHeaderName.CONTENT_TYPE, "application/json"); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } RecordedTestProxyData recordedTestProxyData = readDataFromFile(); RecordedTestProxyData.TestProxyDataRecord record = recordedTestProxyData.getTestProxyDataRecords().get(0); assertEquals("http: assertTrue(record.getResponse().get("Body").contains("<UserDelegationKey><SignedTid>REDACTED</SignedTid></UserDelegationKey>")); assertTrue(record.getResponse().get("primaryKey").contains("<PrimaryKey>REDACTED</PrimaryKey>")); assertEquals(record.getResponse().get("TableName"), REDACTED); } @Test @Tag("Live") public void canGetTestProxyVersion() { String version = TestProxyUtils.getTestProxyVersion(); assertNotNull(version); } @Test @Tag("Record") public void testResetTestProxyData() throws MalformedURLException { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()) .build(); try (HttpResponse response = pipeline.sendSync( new HttpRequest(HttpMethod.GET, new URL("http: assertEquals(200, response.getStatusCode()); HttpHeaders headers = response.getRequest().getHeaders(); assertNull(headers.get(HttpHeaderName.fromString("x-recording-upstream-base-uri"))); assertNull(headers.get(HttpHeaderName.fromString("x-recording-mode"))); assertNull(headers.get(HttpHeaderName.fromString("x-recording-id"))); assertNull(headers.get(HttpHeaderName.fromString("x-recording-skip"))); } } @Test @Tag("Record") private RecordedTestProxyData readDataFromFile() { String filePath = Paths.get(TestUtils.getRecordFolder().getPath(), this.testContextManager.getTestPlaybackRecordingName()) + ".json"; File recordFile = new File(filePath); try (BufferedReader reader = Files.newBufferedReader(recordFile.toPath())) { return RECORD_MAPPER.readValue(reader, RecordedTestProxyData.class); } catch (IOException ex) { throw new UncheckedIOException(ex); } } @JsonIgnoreProperties(ignoreUnknown = true) static class RecordedTestProxyData { @JsonProperty("Entries") private final LinkedList<TestProxyDataRecord> testProxyDataRecords; RecordedTestProxyData() { testProxyDataRecords = new LinkedList<>(); } public LinkedList<TestProxyDataRecord> getTestProxyDataRecords() { return testProxyDataRecords; } @JsonIgnoreProperties(ignoreUnknown = true) static class TestProxyDataRecord { @JsonProperty("RequestMethod") private String method; @JsonProperty("RequestUri") private String uri; @JsonProperty("RequestHeaders") private Map<String, String> headers; @JsonProperty("ResponseBody") private Map<String, String> response; @JsonProperty("ResponseHeaders") private Map<String, String> responseHeaders; @JsonProperty("RequestBody") private String requestBody; public String getMethod() { return method; } public String getUri() { return uri; } public Map<String, String> getHeaders() { return headers; } public Map<String, String> getResponse() { return response; } public Map<String, String> getResponseHeaders() { return responseHeaders; } public String getRequestBody() { return requestBody; } } } }
class TestProxyTests extends TestProxyTestBase { public static final String TEST_DATA = "{\"test\":\"proxy\"}"; static TestProxyTestServer server; private static final ObjectMapper RECORD_MAPPER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT); private static final List<TestProxySanitizer> CUSTOM_SANITIZER = new ArrayList<>(); public static final String REDACTED = "REDACTED"; private static final HttpHeaderName OCP_APIM_SUBSCRIPTION_KEY = HttpHeaderName.fromString("Ocp-Apim-Subscription-Key"); static { CUSTOM_SANITIZER.add(new TestProxySanitizer("$..modelId", null, REDACTED, TestProxySanitizerType.BODY_KEY)); CUSTOM_SANITIZER.add(new TestProxySanitizer("TableName\\\"*:*\\\"(?<tablename>.*)\\\"", REDACTED, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("tablename")); } @BeforeAll public static void setupClass() { server = new TestProxyTestServer(); } @AfterAll public static void teardownClass() { server.close(); } @Test @Tag("Record") public void testBasicRecord() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url = null; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setScheme("http").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } testResourceNamer.randomName("test", 10); testResourceNamer.now(); HttpRequest request = new HttpRequest(HttpMethod.GET, url); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } } @Test @Tag("Playback") public void testOrdering() { String name = testResourceNamer.randomName("test", 10); assertEquals("test32950", name); } @Test @Tag("Record") @DoNotRecord public void testDoNotRecord() { testResourceNamer.now(); } @Test @Tag("Playback") @DoNotRecord public void testDoNotPlayback() { testResourceNamer.now(); } @Test @Tag("Playback") public void testMismatch() { HttpClient client = interceptorManager.getPlaybackClient(); URL url; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setScheme("http").setPath("first/path").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url); RuntimeException thrown = assertThrows(RuntimeException.class, () -> client.sendSync(request, Context.NONE)); assertTrue(thrown.getMessage().contains("Uri doesn't match")); } @Test @Tag("Record") @RecordWithoutRequestBody public void testRecordWithPath() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setPath("first/path").setScheme("http").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } testResourceNamer.randomName("test", 10); testResourceNamer.now(); HttpRequest request = new HttpRequest(HttpMethod.POST, url).setBody(TEST_DATA) .setHeader(HttpHeaderName.CONTENT_TYPE, "application/json") .setHeader(HttpHeaderName.CONTENT_LENGTH, String.valueOf(TEST_DATA.length())); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } } @Test @Tag("Record") public void testRecordWithHeaders() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setPath("echoheaders").setScheme("http").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } testResourceNamer.randomName("test", 10); testResourceNamer.now(); HttpRequest request = new HttpRequest(HttpMethod.GET, url) .setHeader(HttpHeaderName.fromString("header1"), "value1") .setHeader(HttpHeaderName.fromString("header2"), "value2"); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } } @Test @Tag("Playback") public void testPlayback() { HttpClient client = interceptorManager.getPlaybackClient(); URL url; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setPath("first/path").setScheme("http").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url) .setHeader(HttpHeaderName.ACCEPT, "*/*"); try (HttpResponse response = client.sendSync(request, Context.NONE)) { assertEquals("first path", response.getBodyAsBinaryData().toString()); assertEquals(200, response.getStatusCode()); } } @Test @Tag("Live") public void testCannotGetPlaybackClient() { RuntimeException thrown = assertThrows(IllegalStateException.class, () -> interceptorManager.getPlaybackClient()); assertEquals("A playback client can only be requested in PLAYBACK mode.", thrown.getMessage()); } @Test @Tag("Live") public void testCannotGetRecordPolicy() { RuntimeException thrown = assertThrows(IllegalStateException.class, () -> interceptorManager.getRecordPolicy()); assertEquals("A recording policy can only be requested in RECORD mode.", thrown.getMessage()); } @Test @Tag("Record") public void testRecordWithRedaction() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); interceptorManager.addSanitizers(CUSTOM_SANITIZER); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder() .setHost("localhost") .setPath("/fr/path/1") .setPort(3000) .setScheme("http") .toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url) .setHeader(OCP_APIM_SUBSCRIPTION_KEY, "SECRET_API_KEY") .setHeader(HttpHeaderName.CONTENT_TYPE, "application/json") .setHeader(HttpHeaderName.ACCEPT, "*/*"); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(response.getStatusCode(), 200); assertEquals(200, response.getStatusCode()); RecordedTestProxyData recordedTestProxyData = readDataFromFile(); RecordedTestProxyData.TestProxyDataRecord record = recordedTestProxyData.getTestProxyDataRecords().get(0); assertEquals("http: assertEquals(REDACTED, record.getHeaders().get("Ocp-Apim-Subscription-Key")); assertTrue(record.getResponseHeaders().get("Operation-Location") .startsWith("https: assertEquals(REDACTED, record.getResponse().get("modelId")); } } @Test @Tag("Playback") public void testPlaybackWithRedaction() { interceptorManager.addSanitizers(CUSTOM_SANITIZER); interceptorManager.addMatchers(new ArrayList<>(Arrays.asList(new CustomMatcher() .setExcludedHeaders(Arrays.asList("Ocp-Apim-Subscription-Key"))))); HttpClient client = interceptorManager.getPlaybackClient(); URL url; try { url = new UrlBuilder() .setHost("localhost") .setPort(3000) .setPath("/fr/models") .setScheme("http") .toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url) .setHeader(OCP_APIM_SUBSCRIPTION_KEY, "SECRET_API_KEY") .setHeader(HttpHeaderName.CONTENT_TYPE, "application/json") .setHeader(HttpHeaderName.ACCEPT, "*/*"); try (HttpResponse response = client.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } } @Test @Tag("Record") public void testBodyRegexRedactRecord() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); interceptorManager.addSanitizers(CUSTOM_SANITIZER); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder() .setHost("localhost") .setPath("/fr/path/2") .setPort(3000) .setScheme("http") .toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url); request.setHeader(HttpHeaderName.CONTENT_TYPE, "application/json"); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } RecordedTestProxyData recordedTestProxyData = readDataFromFile(); RecordedTestProxyData.TestProxyDataRecord record = recordedTestProxyData.getTestProxyDataRecords().get(0); assertEquals("http: assertTrue(record.getResponse().get("Body").contains("<UserDelegationKey><SignedTid>REDACTED</SignedTid></UserDelegationKey>")); assertTrue(record.getResponse().get("primaryKey").contains("<PrimaryKey>REDACTED</PrimaryKey>")); assertEquals(record.getResponse().get("TableName"), REDACTED); } @Test @Tag("Live") public void canGetTestProxyVersion() { String version = TestProxyUtils.getTestProxyVersion(); assertNotNull(version); } @Test @Tag("Record") public void testResetTestProxyData() throws MalformedURLException { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()) .build(); try (HttpResponse response = pipeline.sendSync( new HttpRequest(HttpMethod.GET, new URL("http: assertEquals(200, response.getStatusCode()); HttpHeaders headers = response.getRequest().getHeaders(); assertNull(headers.get(HttpHeaderName.fromString("x-recording-upstream-base-uri"))); assertNull(headers.get(HttpHeaderName.fromString("x-recording-mode"))); assertNull(headers.get(HttpHeaderName.fromString("x-recording-id"))); assertNull(headers.get(HttpHeaderName.fromString("x-recording-skip"))); } } @Test @Tag("Record") private RecordedTestProxyData readDataFromFile() { String filePath = Paths.get(TestUtils.getRecordFolder().getPath(), this.testContextManager.getTestPlaybackRecordingName()) + ".json"; File recordFile = new File(filePath); try (BufferedReader reader = Files.newBufferedReader(recordFile.toPath())) { return RECORD_MAPPER.readValue(reader, RecordedTestProxyData.class); } catch (IOException ex) { throw new UncheckedIOException(ex); } } @JsonIgnoreProperties(ignoreUnknown = true) static class RecordedTestProxyData { @JsonProperty("Entries") private final LinkedList<TestProxyDataRecord> testProxyDataRecords; RecordedTestProxyData() { testProxyDataRecords = new LinkedList<>(); } public LinkedList<TestProxyDataRecord> getTestProxyDataRecords() { return testProxyDataRecords; } @JsonIgnoreProperties(ignoreUnknown = true) static class TestProxyDataRecord { @JsonProperty("RequestMethod") private String method; @JsonProperty("RequestUri") private String uri; @JsonProperty("RequestHeaders") private Map<String, String> headers; @JsonProperty("ResponseBody") private Map<String, String> response; @JsonProperty("ResponseHeaders") private Map<String, String> responseHeaders; @JsonProperty("RequestBody") private String requestBody; public String getMethod() { return method; } public String getUri() { return uri; } public Map<String, String> getHeaders() { return headers; } public Map<String, String> getResponse() { return response; } public Map<String, String> getResponseHeaders() { return responseHeaders; } public String getRequestBody() { return requestBody; } } } }
is it correct that this test would pass even before this change because base URL is localhost for all requests? Can we make 307 return location like "http://really-anything-but-not-localhost/foo/bar" and then check `HttpURLConnectionHttpClient` was called with it (or use another mock client or policy under recording)?
public void testRecordWithRedirect() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(new RedirectPolicy(), interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder() .setHost("localhost") .setPath("/getRedirect") .setPort(3000) .setScheme("http") .toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); assertEquals("http: assertNull(response.getRequest().getHeaders().get(HttpHeaderName.fromString("x-recording-upstream-base-uri"))); } }
assertEquals("http:
public void testRecordWithRedirect() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(new RedirectPolicy(), interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder() .setHost("localhost") .setPath("/getRedirect") .setPort(3000) .setScheme("http") .toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); assertEquals("http: assertNull(response.getRequest().getHeaders().get(HttpHeaderName.fromString("x-recording-upstream-base-uri"))); } }
class TestProxyTests extends TestProxyTestBase { public static final String TEST_DATA = "{\"test\":\"proxy\"}"; static TestProxyTestServer server; private static final ObjectMapper RECORD_MAPPER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT); private static final List<TestProxySanitizer> CUSTOM_SANITIZER = new ArrayList<>(); public static final String REDACTED = "REDACTED"; private static final HttpHeaderName OCP_APIM_SUBSCRIPTION_KEY = HttpHeaderName.fromString("Ocp-Apim-Subscription-Key"); static { CUSTOM_SANITIZER.add(new TestProxySanitizer("$..modelId", null, REDACTED, TestProxySanitizerType.BODY_KEY)); CUSTOM_SANITIZER.add(new TestProxySanitizer("TableName\\\"*:*\\\"(?<tablename>.*)\\\"", REDACTED, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("tablename")); } @BeforeAll public static void setupClass() { server = new TestProxyTestServer(); } @AfterAll public static void teardownClass() { server.close(); } @Test @Tag("Record") public void testBasicRecord() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url = null; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setScheme("http").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } testResourceNamer.randomName("test", 10); testResourceNamer.now(); HttpRequest request = new HttpRequest(HttpMethod.GET, url); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } } @Test @Tag("Playback") public void testOrdering() { String name = testResourceNamer.randomName("test", 10); assertEquals("test32950", name); } @Test @Tag("Record") @DoNotRecord public void testDoNotRecord() { testResourceNamer.now(); } @Test @Tag("Playback") @DoNotRecord public void testDoNotPlayback() { testResourceNamer.now(); } @Test @Tag("Playback") public void testMismatch() { HttpClient client = interceptorManager.getPlaybackClient(); URL url; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setScheme("http").setPath("first/path").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url); RuntimeException thrown = assertThrows(RuntimeException.class, () -> client.sendSync(request, Context.NONE)); assertTrue(thrown.getMessage().contains("Uri doesn't match")); } @Test @Tag("Record") @RecordWithoutRequestBody public void testRecordWithPath() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setPath("first/path").setScheme("http").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } testResourceNamer.randomName("test", 10); testResourceNamer.now(); HttpRequest request = new HttpRequest(HttpMethod.POST, url).setBody(TEST_DATA) .setHeader(HttpHeaderName.CONTENT_TYPE, "application/json") .setHeader(HttpHeaderName.CONTENT_LENGTH, String.valueOf(TEST_DATA.length())); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } } @Test @Tag("Record") public void testRecordWithHeaders() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setPath("echoheaders").setScheme("http").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } testResourceNamer.randomName("test", 10); testResourceNamer.now(); HttpRequest request = new HttpRequest(HttpMethod.GET, url) .setHeader(HttpHeaderName.fromString("header1"), "value1") .setHeader(HttpHeaderName.fromString("header2"), "value2"); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } } @Test @Tag("Playback") public void testPlayback() { HttpClient client = interceptorManager.getPlaybackClient(); URL url; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setPath("first/path").setScheme("http").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url) .setHeader(HttpHeaderName.ACCEPT, "*/*"); try (HttpResponse response = client.sendSync(request, Context.NONE)) { assertEquals("first path", response.getBodyAsBinaryData().toString()); assertEquals(200, response.getStatusCode()); } } @Test @Tag("Live") public void testCannotGetPlaybackClient() { RuntimeException thrown = assertThrows(IllegalStateException.class, () -> interceptorManager.getPlaybackClient()); assertEquals("A playback client can only be requested in PLAYBACK mode.", thrown.getMessage()); } @Test @Tag("Live") public void testCannotGetRecordPolicy() { RuntimeException thrown = assertThrows(IllegalStateException.class, () -> interceptorManager.getRecordPolicy()); assertEquals("A recording policy can only be requested in RECORD mode.", thrown.getMessage()); } @Test @Tag("Record") public void testRecordWithRedaction() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); interceptorManager.addSanitizers(CUSTOM_SANITIZER); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder() .setHost("localhost") .setPath("/fr/path/1") .setPort(3000) .setScheme("http") .toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url) .setHeader(OCP_APIM_SUBSCRIPTION_KEY, "SECRET_API_KEY") .setHeader(HttpHeaderName.CONTENT_TYPE, "application/json") .setHeader(HttpHeaderName.ACCEPT, "*/*"); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(response.getStatusCode(), 200); assertEquals(200, response.getStatusCode()); RecordedTestProxyData recordedTestProxyData = readDataFromFile(); RecordedTestProxyData.TestProxyDataRecord record = recordedTestProxyData.getTestProxyDataRecords().get(0); assertEquals("http: assertEquals(REDACTED, record.getHeaders().get("Ocp-Apim-Subscription-Key")); assertTrue(record.getResponseHeaders().get("Operation-Location") .startsWith("https: assertEquals(REDACTED, record.getResponse().get("modelId")); } } @Test @Tag("Playback") public void testPlaybackWithRedaction() { interceptorManager.addSanitizers(CUSTOM_SANITIZER); interceptorManager.addMatchers(new ArrayList<>(Arrays.asList(new CustomMatcher() .setExcludedHeaders(Arrays.asList("Ocp-Apim-Subscription-Key"))))); HttpClient client = interceptorManager.getPlaybackClient(); URL url; try { url = new UrlBuilder() .setHost("localhost") .setPort(3000) .setPath("/fr/models") .setScheme("http") .toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url) .setHeader(OCP_APIM_SUBSCRIPTION_KEY, "SECRET_API_KEY") .setHeader(HttpHeaderName.CONTENT_TYPE, "application/json") .setHeader(HttpHeaderName.ACCEPT, "*/*"); try (HttpResponse response = client.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } } @Test @Tag("Record") public void testBodyRegexRedactRecord() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); interceptorManager.addSanitizers(CUSTOM_SANITIZER); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder() .setHost("localhost") .setPath("/fr/path/2") .setPort(3000) .setScheme("http") .toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url); request.setHeader(HttpHeaderName.CONTENT_TYPE, "application/json"); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } RecordedTestProxyData recordedTestProxyData = readDataFromFile(); RecordedTestProxyData.TestProxyDataRecord record = recordedTestProxyData.getTestProxyDataRecords().get(0); assertEquals("http: assertTrue(record.getResponse().get("Body").contains("<UserDelegationKey><SignedTid>REDACTED</SignedTid></UserDelegationKey>")); assertTrue(record.getResponse().get("primaryKey").contains("<PrimaryKey>REDACTED</PrimaryKey>")); assertEquals(record.getResponse().get("TableName"), REDACTED); } @Test @Tag("Live") public void canGetTestProxyVersion() { String version = TestProxyUtils.getTestProxyVersion(); assertNotNull(version); } @Test @Tag("Record") public void testResetTestProxyData() throws MalformedURLException { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()) .build(); try (HttpResponse response = pipeline.sendSync( new HttpRequest(HttpMethod.GET, new URL("http: assertEquals(200, response.getStatusCode()); HttpHeaders headers = response.getRequest().getHeaders(); assertNull(headers.get(HttpHeaderName.fromString("x-recording-upstream-base-uri"))); assertNull(headers.get(HttpHeaderName.fromString("x-recording-mode"))); assertNull(headers.get(HttpHeaderName.fromString("x-recording-id"))); assertNull(headers.get(HttpHeaderName.fromString("x-recording-skip"))); } } @Test @Tag("Record") private RecordedTestProxyData readDataFromFile() { String filePath = Paths.get(TestUtils.getRecordFolder().getPath(), this.testContextManager.getTestPlaybackRecordingName()) + ".json"; File recordFile = new File(filePath); try (BufferedReader reader = Files.newBufferedReader(recordFile.toPath())) { return RECORD_MAPPER.readValue(reader, RecordedTestProxyData.class); } catch (IOException ex) { throw new UncheckedIOException(ex); } } @JsonIgnoreProperties(ignoreUnknown = true) static class RecordedTestProxyData { @JsonProperty("Entries") private final LinkedList<TestProxyDataRecord> testProxyDataRecords; RecordedTestProxyData() { testProxyDataRecords = new LinkedList<>(); } public LinkedList<TestProxyDataRecord> getTestProxyDataRecords() { return testProxyDataRecords; } @JsonIgnoreProperties(ignoreUnknown = true) static class TestProxyDataRecord { @JsonProperty("RequestMethod") private String method; @JsonProperty("RequestUri") private String uri; @JsonProperty("RequestHeaders") private Map<String, String> headers; @JsonProperty("ResponseBody") private Map<String, String> response; @JsonProperty("ResponseHeaders") private Map<String, String> responseHeaders; @JsonProperty("RequestBody") private String requestBody; public String getMethod() { return method; } public String getUri() { return uri; } public Map<String, String> getHeaders() { return headers; } public Map<String, String> getResponse() { return response; } public Map<String, String> getResponseHeaders() { return responseHeaders; } public String getRequestBody() { return requestBody; } } } }
class TestProxyTests extends TestProxyTestBase { public static final String TEST_DATA = "{\"test\":\"proxy\"}"; static TestProxyTestServer server; private static final ObjectMapper RECORD_MAPPER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT); private static final List<TestProxySanitizer> CUSTOM_SANITIZER = new ArrayList<>(); public static final String REDACTED = "REDACTED"; private static final HttpHeaderName OCP_APIM_SUBSCRIPTION_KEY = HttpHeaderName.fromString("Ocp-Apim-Subscription-Key"); static { CUSTOM_SANITIZER.add(new TestProxySanitizer("$..modelId", null, REDACTED, TestProxySanitizerType.BODY_KEY)); CUSTOM_SANITIZER.add(new TestProxySanitizer("TableName\\\"*:*\\\"(?<tablename>.*)\\\"", REDACTED, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("tablename")); } @BeforeAll public static void setupClass() { server = new TestProxyTestServer(); } @AfterAll public static void teardownClass() { server.close(); } @Test @Tag("Record") public void testBasicRecord() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url = null; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setScheme("http").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } testResourceNamer.randomName("test", 10); testResourceNamer.now(); HttpRequest request = new HttpRequest(HttpMethod.GET, url); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } } @Test @Tag("Playback") public void testOrdering() { String name = testResourceNamer.randomName("test", 10); assertEquals("test32950", name); } @Test @Tag("Record") @DoNotRecord public void testDoNotRecord() { testResourceNamer.now(); } @Test @Tag("Playback") @DoNotRecord public void testDoNotPlayback() { testResourceNamer.now(); } @Test @Tag("Playback") public void testMismatch() { HttpClient client = interceptorManager.getPlaybackClient(); URL url; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setScheme("http").setPath("first/path").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url); RuntimeException thrown = assertThrows(RuntimeException.class, () -> client.sendSync(request, Context.NONE)); assertTrue(thrown.getMessage().contains("Uri doesn't match")); } @Test @Tag("Record") @RecordWithoutRequestBody public void testRecordWithPath() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setPath("first/path").setScheme("http").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } testResourceNamer.randomName("test", 10); testResourceNamer.now(); HttpRequest request = new HttpRequest(HttpMethod.POST, url).setBody(TEST_DATA) .setHeader(HttpHeaderName.CONTENT_TYPE, "application/json") .setHeader(HttpHeaderName.CONTENT_LENGTH, String.valueOf(TEST_DATA.length())); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } } @Test @Tag("Record") public void testRecordWithHeaders() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setPath("echoheaders").setScheme("http").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } testResourceNamer.randomName("test", 10); testResourceNamer.now(); HttpRequest request = new HttpRequest(HttpMethod.GET, url) .setHeader(HttpHeaderName.fromString("header1"), "value1") .setHeader(HttpHeaderName.fromString("header2"), "value2"); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } } @Test @Tag("Playback") public void testPlayback() { HttpClient client = interceptorManager.getPlaybackClient(); URL url; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setPath("first/path").setScheme("http").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url) .setHeader(HttpHeaderName.ACCEPT, "*/*"); try (HttpResponse response = client.sendSync(request, Context.NONE)) { assertEquals("first path", response.getBodyAsBinaryData().toString()); assertEquals(200, response.getStatusCode()); } } @Test @Tag("Live") public void testCannotGetPlaybackClient() { RuntimeException thrown = assertThrows(IllegalStateException.class, () -> interceptorManager.getPlaybackClient()); assertEquals("A playback client can only be requested in PLAYBACK mode.", thrown.getMessage()); } @Test @Tag("Live") public void testCannotGetRecordPolicy() { RuntimeException thrown = assertThrows(IllegalStateException.class, () -> interceptorManager.getRecordPolicy()); assertEquals("A recording policy can only be requested in RECORD mode.", thrown.getMessage()); } @Test @Tag("Record") public void testRecordWithRedaction() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); interceptorManager.addSanitizers(CUSTOM_SANITIZER); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder() .setHost("localhost") .setPath("/fr/path/1") .setPort(3000) .setScheme("http") .toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url) .setHeader(OCP_APIM_SUBSCRIPTION_KEY, "SECRET_API_KEY") .setHeader(HttpHeaderName.CONTENT_TYPE, "application/json") .setHeader(HttpHeaderName.ACCEPT, "*/*"); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(response.getStatusCode(), 200); assertEquals(200, response.getStatusCode()); RecordedTestProxyData recordedTestProxyData = readDataFromFile(); RecordedTestProxyData.TestProxyDataRecord record = recordedTestProxyData.getTestProxyDataRecords().get(0); assertEquals("http: assertEquals(REDACTED, record.getHeaders().get("Ocp-Apim-Subscription-Key")); assertTrue(record.getResponseHeaders().get("Operation-Location") .startsWith("https: assertEquals(REDACTED, record.getResponse().get("modelId")); } } @Test @Tag("Playback") public void testPlaybackWithRedaction() { interceptorManager.addSanitizers(CUSTOM_SANITIZER); interceptorManager.addMatchers(new ArrayList<>(Arrays.asList(new CustomMatcher() .setExcludedHeaders(Arrays.asList("Ocp-Apim-Subscription-Key"))))); HttpClient client = interceptorManager.getPlaybackClient(); URL url; try { url = new UrlBuilder() .setHost("localhost") .setPort(3000) .setPath("/fr/models") .setScheme("http") .toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url) .setHeader(OCP_APIM_SUBSCRIPTION_KEY, "SECRET_API_KEY") .setHeader(HttpHeaderName.CONTENT_TYPE, "application/json") .setHeader(HttpHeaderName.ACCEPT, "*/*"); try (HttpResponse response = client.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } } @Test @Tag("Record") public void testBodyRegexRedactRecord() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); interceptorManager.addSanitizers(CUSTOM_SANITIZER); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder() .setHost("localhost") .setPath("/fr/path/2") .setPort(3000) .setScheme("http") .toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url); request.setHeader(HttpHeaderName.CONTENT_TYPE, "application/json"); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } RecordedTestProxyData recordedTestProxyData = readDataFromFile(); RecordedTestProxyData.TestProxyDataRecord record = recordedTestProxyData.getTestProxyDataRecords().get(0); assertEquals("http: assertTrue(record.getResponse().get("Body").contains("<UserDelegationKey><SignedTid>REDACTED</SignedTid></UserDelegationKey>")); assertTrue(record.getResponse().get("primaryKey").contains("<PrimaryKey>REDACTED</PrimaryKey>")); assertEquals(record.getResponse().get("TableName"), REDACTED); } @Test @Tag("Live") public void canGetTestProxyVersion() { String version = TestProxyUtils.getTestProxyVersion(); assertNotNull(version); } @Test @Tag("Record") public void testResetTestProxyData() throws MalformedURLException { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()) .build(); try (HttpResponse response = pipeline.sendSync( new HttpRequest(HttpMethod.GET, new URL("http: assertEquals(200, response.getStatusCode()); HttpHeaders headers = response.getRequest().getHeaders(); assertNull(headers.get(HttpHeaderName.fromString("x-recording-upstream-base-uri"))); assertNull(headers.get(HttpHeaderName.fromString("x-recording-mode"))); assertNull(headers.get(HttpHeaderName.fromString("x-recording-id"))); assertNull(headers.get(HttpHeaderName.fromString("x-recording-skip"))); } } @Test @Tag("Record") private RecordedTestProxyData readDataFromFile() { String filePath = Paths.get(TestUtils.getRecordFolder().getPath(), this.testContextManager.getTestPlaybackRecordingName()) + ".json"; File recordFile = new File(filePath); try (BufferedReader reader = Files.newBufferedReader(recordFile.toPath())) { return RECORD_MAPPER.readValue(reader, RecordedTestProxyData.class); } catch (IOException ex) { throw new UncheckedIOException(ex); } } @JsonIgnoreProperties(ignoreUnknown = true) static class RecordedTestProxyData { @JsonProperty("Entries") private final LinkedList<TestProxyDataRecord> testProxyDataRecords; RecordedTestProxyData() { testProxyDataRecords = new LinkedList<>(); } public LinkedList<TestProxyDataRecord> getTestProxyDataRecords() { return testProxyDataRecords; } @JsonIgnoreProperties(ignoreUnknown = true) static class TestProxyDataRecord { @JsonProperty("RequestMethod") private String method; @JsonProperty("RequestUri") private String uri; @JsonProperty("RequestHeaders") private Map<String, String> headers; @JsonProperty("ResponseBody") private Map<String, String> response; @JsonProperty("ResponseHeaders") private Map<String, String> responseHeaders; @JsonProperty("RequestBody") private String requestBody; public String getMethod() { return method; } public String getUri() { return uri; } public Map<String, String> getHeaders() { return headers; } public Map<String, String> getResponse() { return response; } public Map<String, String> getResponseHeaders() { return responseHeaders; } public String getRequestBody() { return requestBody; } } } }
The base url is http://localhost:3000/ but this test case particularly tests if the 200 status code on the response is returned with the expected redirected url. Plus, the recording shows the two outgoing requests currently. Should that be enough?
public void testRecordWithRedirect() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(new RedirectPolicy(), interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder() .setHost("localhost") .setPath("/getRedirect") .setPort(3000) .setScheme("http") .toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); assertEquals("http: assertNull(response.getRequest().getHeaders().get(HttpHeaderName.fromString("x-recording-upstream-base-uri"))); } }
assertEquals("http:
public void testRecordWithRedirect() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(new RedirectPolicy(), interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder() .setHost("localhost") .setPath("/getRedirect") .setPort(3000) .setScheme("http") .toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); assertEquals("http: assertNull(response.getRequest().getHeaders().get(HttpHeaderName.fromString("x-recording-upstream-base-uri"))); } }
class TestProxyTests extends TestProxyTestBase { public static final String TEST_DATA = "{\"test\":\"proxy\"}"; static TestProxyTestServer server; private static final ObjectMapper RECORD_MAPPER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT); private static final List<TestProxySanitizer> CUSTOM_SANITIZER = new ArrayList<>(); public static final String REDACTED = "REDACTED"; private static final HttpHeaderName OCP_APIM_SUBSCRIPTION_KEY = HttpHeaderName.fromString("Ocp-Apim-Subscription-Key"); static { CUSTOM_SANITIZER.add(new TestProxySanitizer("$..modelId", null, REDACTED, TestProxySanitizerType.BODY_KEY)); CUSTOM_SANITIZER.add(new TestProxySanitizer("TableName\\\"*:*\\\"(?<tablename>.*)\\\"", REDACTED, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("tablename")); } @BeforeAll public static void setupClass() { server = new TestProxyTestServer(); } @AfterAll public static void teardownClass() { server.close(); } @Test @Tag("Record") public void testBasicRecord() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url = null; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setScheme("http").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } testResourceNamer.randomName("test", 10); testResourceNamer.now(); HttpRequest request = new HttpRequest(HttpMethod.GET, url); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } } @Test @Tag("Playback") public void testOrdering() { String name = testResourceNamer.randomName("test", 10); assertEquals("test32950", name); } @Test @Tag("Record") @DoNotRecord public void testDoNotRecord() { testResourceNamer.now(); } @Test @Tag("Playback") @DoNotRecord public void testDoNotPlayback() { testResourceNamer.now(); } @Test @Tag("Playback") public void testMismatch() { HttpClient client = interceptorManager.getPlaybackClient(); URL url; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setScheme("http").setPath("first/path").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url); RuntimeException thrown = assertThrows(RuntimeException.class, () -> client.sendSync(request, Context.NONE)); assertTrue(thrown.getMessage().contains("Uri doesn't match")); } @Test @Tag("Record") @RecordWithoutRequestBody public void testRecordWithPath() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setPath("first/path").setScheme("http").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } testResourceNamer.randomName("test", 10); testResourceNamer.now(); HttpRequest request = new HttpRequest(HttpMethod.POST, url).setBody(TEST_DATA) .setHeader(HttpHeaderName.CONTENT_TYPE, "application/json") .setHeader(HttpHeaderName.CONTENT_LENGTH, String.valueOf(TEST_DATA.length())); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } } @Test @Tag("Record") public void testRecordWithHeaders() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setPath("echoheaders").setScheme("http").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } testResourceNamer.randomName("test", 10); testResourceNamer.now(); HttpRequest request = new HttpRequest(HttpMethod.GET, url) .setHeader(HttpHeaderName.fromString("header1"), "value1") .setHeader(HttpHeaderName.fromString("header2"), "value2"); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } } @Test @Tag("Playback") public void testPlayback() { HttpClient client = interceptorManager.getPlaybackClient(); URL url; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setPath("first/path").setScheme("http").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url) .setHeader(HttpHeaderName.ACCEPT, "*/*"); try (HttpResponse response = client.sendSync(request, Context.NONE)) { assertEquals("first path", response.getBodyAsBinaryData().toString()); assertEquals(200, response.getStatusCode()); } } @Test @Tag("Live") public void testCannotGetPlaybackClient() { RuntimeException thrown = assertThrows(IllegalStateException.class, () -> interceptorManager.getPlaybackClient()); assertEquals("A playback client can only be requested in PLAYBACK mode.", thrown.getMessage()); } @Test @Tag("Live") public void testCannotGetRecordPolicy() { RuntimeException thrown = assertThrows(IllegalStateException.class, () -> interceptorManager.getRecordPolicy()); assertEquals("A recording policy can only be requested in RECORD mode.", thrown.getMessage()); } @Test @Tag("Record") public void testRecordWithRedaction() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); interceptorManager.addSanitizers(CUSTOM_SANITIZER); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder() .setHost("localhost") .setPath("/fr/path/1") .setPort(3000) .setScheme("http") .toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url) .setHeader(OCP_APIM_SUBSCRIPTION_KEY, "SECRET_API_KEY") .setHeader(HttpHeaderName.CONTENT_TYPE, "application/json") .setHeader(HttpHeaderName.ACCEPT, "*/*"); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(response.getStatusCode(), 200); assertEquals(200, response.getStatusCode()); RecordedTestProxyData recordedTestProxyData = readDataFromFile(); RecordedTestProxyData.TestProxyDataRecord record = recordedTestProxyData.getTestProxyDataRecords().get(0); assertEquals("http: assertEquals(REDACTED, record.getHeaders().get("Ocp-Apim-Subscription-Key")); assertTrue(record.getResponseHeaders().get("Operation-Location") .startsWith("https: assertEquals(REDACTED, record.getResponse().get("modelId")); } } @Test @Tag("Playback") public void testPlaybackWithRedaction() { interceptorManager.addSanitizers(CUSTOM_SANITIZER); interceptorManager.addMatchers(new ArrayList<>(Arrays.asList(new CustomMatcher() .setExcludedHeaders(Arrays.asList("Ocp-Apim-Subscription-Key"))))); HttpClient client = interceptorManager.getPlaybackClient(); URL url; try { url = new UrlBuilder() .setHost("localhost") .setPort(3000) .setPath("/fr/models") .setScheme("http") .toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url) .setHeader(OCP_APIM_SUBSCRIPTION_KEY, "SECRET_API_KEY") .setHeader(HttpHeaderName.CONTENT_TYPE, "application/json") .setHeader(HttpHeaderName.ACCEPT, "*/*"); try (HttpResponse response = client.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } } @Test @Tag("Record") public void testBodyRegexRedactRecord() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); interceptorManager.addSanitizers(CUSTOM_SANITIZER); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder() .setHost("localhost") .setPath("/fr/path/2") .setPort(3000) .setScheme("http") .toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url); request.setHeader(HttpHeaderName.CONTENT_TYPE, "application/json"); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } RecordedTestProxyData recordedTestProxyData = readDataFromFile(); RecordedTestProxyData.TestProxyDataRecord record = recordedTestProxyData.getTestProxyDataRecords().get(0); assertEquals("http: assertTrue(record.getResponse().get("Body").contains("<UserDelegationKey><SignedTid>REDACTED</SignedTid></UserDelegationKey>")); assertTrue(record.getResponse().get("primaryKey").contains("<PrimaryKey>REDACTED</PrimaryKey>")); assertEquals(record.getResponse().get("TableName"), REDACTED); } @Test @Tag("Live") public void canGetTestProxyVersion() { String version = TestProxyUtils.getTestProxyVersion(); assertNotNull(version); } @Test @Tag("Record") public void testResetTestProxyData() throws MalformedURLException { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()) .build(); try (HttpResponse response = pipeline.sendSync( new HttpRequest(HttpMethod.GET, new URL("http: assertEquals(200, response.getStatusCode()); HttpHeaders headers = response.getRequest().getHeaders(); assertNull(headers.get(HttpHeaderName.fromString("x-recording-upstream-base-uri"))); assertNull(headers.get(HttpHeaderName.fromString("x-recording-mode"))); assertNull(headers.get(HttpHeaderName.fromString("x-recording-id"))); assertNull(headers.get(HttpHeaderName.fromString("x-recording-skip"))); } } @Test @Tag("Record") private RecordedTestProxyData readDataFromFile() { String filePath = Paths.get(TestUtils.getRecordFolder().getPath(), this.testContextManager.getTestPlaybackRecordingName()) + ".json"; File recordFile = new File(filePath); try (BufferedReader reader = Files.newBufferedReader(recordFile.toPath())) { return RECORD_MAPPER.readValue(reader, RecordedTestProxyData.class); } catch (IOException ex) { throw new UncheckedIOException(ex); } } @JsonIgnoreProperties(ignoreUnknown = true) static class RecordedTestProxyData { @JsonProperty("Entries") private final LinkedList<TestProxyDataRecord> testProxyDataRecords; RecordedTestProxyData() { testProxyDataRecords = new LinkedList<>(); } public LinkedList<TestProxyDataRecord> getTestProxyDataRecords() { return testProxyDataRecords; } @JsonIgnoreProperties(ignoreUnknown = true) static class TestProxyDataRecord { @JsonProperty("RequestMethod") private String method; @JsonProperty("RequestUri") private String uri; @JsonProperty("RequestHeaders") private Map<String, String> headers; @JsonProperty("ResponseBody") private Map<String, String> response; @JsonProperty("ResponseHeaders") private Map<String, String> responseHeaders; @JsonProperty("RequestBody") private String requestBody; public String getMethod() { return method; } public String getUri() { return uri; } public Map<String, String> getHeaders() { return headers; } public Map<String, String> getResponse() { return response; } public Map<String, String> getResponseHeaders() { return responseHeaders; } public String getRequestBody() { return requestBody; } } } }
class TestProxyTests extends TestProxyTestBase { public static final String TEST_DATA = "{\"test\":\"proxy\"}"; static TestProxyTestServer server; private static final ObjectMapper RECORD_MAPPER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT); private static final List<TestProxySanitizer> CUSTOM_SANITIZER = new ArrayList<>(); public static final String REDACTED = "REDACTED"; private static final HttpHeaderName OCP_APIM_SUBSCRIPTION_KEY = HttpHeaderName.fromString("Ocp-Apim-Subscription-Key"); static { CUSTOM_SANITIZER.add(new TestProxySanitizer("$..modelId", null, REDACTED, TestProxySanitizerType.BODY_KEY)); CUSTOM_SANITIZER.add(new TestProxySanitizer("TableName\\\"*:*\\\"(?<tablename>.*)\\\"", REDACTED, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("tablename")); } @BeforeAll public static void setupClass() { server = new TestProxyTestServer(); } @AfterAll public static void teardownClass() { server.close(); } @Test @Tag("Record") public void testBasicRecord() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url = null; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setScheme("http").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } testResourceNamer.randomName("test", 10); testResourceNamer.now(); HttpRequest request = new HttpRequest(HttpMethod.GET, url); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } } @Test @Tag("Playback") public void testOrdering() { String name = testResourceNamer.randomName("test", 10); assertEquals("test32950", name); } @Test @Tag("Record") @DoNotRecord public void testDoNotRecord() { testResourceNamer.now(); } @Test @Tag("Playback") @DoNotRecord public void testDoNotPlayback() { testResourceNamer.now(); } @Test @Tag("Playback") public void testMismatch() { HttpClient client = interceptorManager.getPlaybackClient(); URL url; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setScheme("http").setPath("first/path").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url); RuntimeException thrown = assertThrows(RuntimeException.class, () -> client.sendSync(request, Context.NONE)); assertTrue(thrown.getMessage().contains("Uri doesn't match")); } @Test @Tag("Record") @RecordWithoutRequestBody public void testRecordWithPath() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setPath("first/path").setScheme("http").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } testResourceNamer.randomName("test", 10); testResourceNamer.now(); HttpRequest request = new HttpRequest(HttpMethod.POST, url).setBody(TEST_DATA) .setHeader(HttpHeaderName.CONTENT_TYPE, "application/json") .setHeader(HttpHeaderName.CONTENT_LENGTH, String.valueOf(TEST_DATA.length())); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } } @Test @Tag("Record") public void testRecordWithHeaders() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setPath("echoheaders").setScheme("http").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } testResourceNamer.randomName("test", 10); testResourceNamer.now(); HttpRequest request = new HttpRequest(HttpMethod.GET, url) .setHeader(HttpHeaderName.fromString("header1"), "value1") .setHeader(HttpHeaderName.fromString("header2"), "value2"); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } } @Test @Tag("Playback") public void testPlayback() { HttpClient client = interceptorManager.getPlaybackClient(); URL url; try { url = new UrlBuilder().setHost("localhost").setPort(3000).setPath("first/path").setScheme("http").toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url) .setHeader(HttpHeaderName.ACCEPT, "*/*"); try (HttpResponse response = client.sendSync(request, Context.NONE)) { assertEquals("first path", response.getBodyAsBinaryData().toString()); assertEquals(200, response.getStatusCode()); } } @Test @Tag("Live") public void testCannotGetPlaybackClient() { RuntimeException thrown = assertThrows(IllegalStateException.class, () -> interceptorManager.getPlaybackClient()); assertEquals("A playback client can only be requested in PLAYBACK mode.", thrown.getMessage()); } @Test @Tag("Live") public void testCannotGetRecordPolicy() { RuntimeException thrown = assertThrows(IllegalStateException.class, () -> interceptorManager.getRecordPolicy()); assertEquals("A recording policy can only be requested in RECORD mode.", thrown.getMessage()); } @Test @Tag("Record") public void testRecordWithRedaction() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); interceptorManager.addSanitizers(CUSTOM_SANITIZER); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder() .setHost("localhost") .setPath("/fr/path/1") .setPort(3000) .setScheme("http") .toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url) .setHeader(OCP_APIM_SUBSCRIPTION_KEY, "SECRET_API_KEY") .setHeader(HttpHeaderName.CONTENT_TYPE, "application/json") .setHeader(HttpHeaderName.ACCEPT, "*/*"); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(response.getStatusCode(), 200); assertEquals(200, response.getStatusCode()); RecordedTestProxyData recordedTestProxyData = readDataFromFile(); RecordedTestProxyData.TestProxyDataRecord record = recordedTestProxyData.getTestProxyDataRecords().get(0); assertEquals("http: assertEquals(REDACTED, record.getHeaders().get("Ocp-Apim-Subscription-Key")); assertTrue(record.getResponseHeaders().get("Operation-Location") .startsWith("https: assertEquals(REDACTED, record.getResponse().get("modelId")); } } @Test @Tag("Playback") public void testPlaybackWithRedaction() { interceptorManager.addSanitizers(CUSTOM_SANITIZER); interceptorManager.addMatchers(new ArrayList<>(Arrays.asList(new CustomMatcher() .setExcludedHeaders(Arrays.asList("Ocp-Apim-Subscription-Key"))))); HttpClient client = interceptorManager.getPlaybackClient(); URL url; try { url = new UrlBuilder() .setHost("localhost") .setPort(3000) .setPath("/fr/models") .setScheme("http") .toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url) .setHeader(OCP_APIM_SUBSCRIPTION_KEY, "SECRET_API_KEY") .setHeader(HttpHeaderName.CONTENT_TYPE, "application/json") .setHeader(HttpHeaderName.ACCEPT, "*/*"); try (HttpResponse response = client.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } } @Test @Tag("Record") public void testBodyRegexRedactRecord() { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); interceptorManager.addSanitizers(CUSTOM_SANITIZER); HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()).build(); URL url; try { url = new UrlBuilder() .setHost("localhost") .setPath("/fr/path/2") .setPort(3000) .setScheme("http") .toUrl(); } catch (MalformedURLException e) { throw new RuntimeException(e); } HttpRequest request = new HttpRequest(HttpMethod.GET, url); request.setHeader(HttpHeaderName.CONTENT_TYPE, "application/json"); try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) { assertEquals(200, response.getStatusCode()); } RecordedTestProxyData recordedTestProxyData = readDataFromFile(); RecordedTestProxyData.TestProxyDataRecord record = recordedTestProxyData.getTestProxyDataRecords().get(0); assertEquals("http: assertTrue(record.getResponse().get("Body").contains("<UserDelegationKey><SignedTid>REDACTED</SignedTid></UserDelegationKey>")); assertTrue(record.getResponse().get("primaryKey").contains("<PrimaryKey>REDACTED</PrimaryKey>")); assertEquals(record.getResponse().get("TableName"), REDACTED); } @Test @Tag("Live") public void canGetTestProxyVersion() { String version = TestProxyUtils.getTestProxyVersion(); assertNotNull(version); } @Test @Tag("Record") public void testResetTestProxyData() throws MalformedURLException { HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient(); final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(client) .policies(interceptorManager.getRecordPolicy()) .build(); try (HttpResponse response = pipeline.sendSync( new HttpRequest(HttpMethod.GET, new URL("http: assertEquals(200, response.getStatusCode()); HttpHeaders headers = response.getRequest().getHeaders(); assertNull(headers.get(HttpHeaderName.fromString("x-recording-upstream-base-uri"))); assertNull(headers.get(HttpHeaderName.fromString("x-recording-mode"))); assertNull(headers.get(HttpHeaderName.fromString("x-recording-id"))); assertNull(headers.get(HttpHeaderName.fromString("x-recording-skip"))); } } @Test @Tag("Record") private RecordedTestProxyData readDataFromFile() { String filePath = Paths.get(TestUtils.getRecordFolder().getPath(), this.testContextManager.getTestPlaybackRecordingName()) + ".json"; File recordFile = new File(filePath); try (BufferedReader reader = Files.newBufferedReader(recordFile.toPath())) { return RECORD_MAPPER.readValue(reader, RecordedTestProxyData.class); } catch (IOException ex) { throw new UncheckedIOException(ex); } } @JsonIgnoreProperties(ignoreUnknown = true) static class RecordedTestProxyData { @JsonProperty("Entries") private final LinkedList<TestProxyDataRecord> testProxyDataRecords; RecordedTestProxyData() { testProxyDataRecords = new LinkedList<>(); } public LinkedList<TestProxyDataRecord> getTestProxyDataRecords() { return testProxyDataRecords; } @JsonIgnoreProperties(ignoreUnknown = true) static class TestProxyDataRecord { @JsonProperty("RequestMethod") private String method; @JsonProperty("RequestUri") private String uri; @JsonProperty("RequestHeaders") private Map<String, String> headers; @JsonProperty("ResponseBody") private Map<String, String> response; @JsonProperty("ResponseHeaders") private Map<String, String> responseHeaders; @JsonProperty("RequestBody") private String requestBody; public String getMethod() { return method; } public String getUri() { return uri; } public Map<String, String> getHeaders() { return headers; } public Map<String, String> getResponse() { return response; } public Map<String, String> getResponseHeaders() { return responseHeaders; } public String getRequestBody() { return requestBody; } } } }
There is a utility method `CoreUtils.extractSizeFromContentRange` that this can be replaced with.
private Context downloadBlobInternal(String digest, WritableByteChannel channel, Context context) { Objects.requireNonNull(digest, "'digest' cannot be null."); MessageDigest sha256 = createSha256(); try { HttpRange range = new HttpRange(0, (long) CHUNK_SIZE); context = context.addData("azure-eagerly-read-response", true); Response<BinaryData> lastChunk = blobsImpl.getChunkWithResponse(repositoryName, digest, range.toString(), context); long blobSize = getBlobSize(lastChunk.getHeaders().get(HttpHeaderName.CONTENT_RANGE)); long length = writeChunk(lastChunk, sha256, channel); for (long p = length; p < blobSize; p += CHUNK_SIZE) { range = new HttpRange(p, (long) CHUNK_SIZE); lastChunk = blobsImpl.getChunkWithResponse(repositoryName, digest, range.toString(), context); writeChunk(lastChunk, sha256, channel); } } catch (AcrErrorsException exception) { throw LOGGER.logExceptionAsError(mapAcrErrorsException(exception)); } validateDigest(sha256, digest); return context; }
long blobSize = getBlobSize(lastChunk.getHeaders().get(HttpHeaderName.CONTENT_RANGE));
private Context downloadBlobInternal(String digest, WritableByteChannel channel, Context context) { Objects.requireNonNull(digest, "'digest' cannot be null."); MessageDigest sha256 = createSha256(); try { HttpRange range = new HttpRange(0, (long) CHUNK_SIZE); context = context.addData("azure-eagerly-read-response", true); Response<BinaryData> lastChunk = blobsImpl.getChunkWithResponse(repositoryName, digest, range.toString(), context); long blobSize = getBlobSize(lastChunk.getHeaders()); long length = writeChunk(lastChunk, sha256, channel); for (long p = length; p < blobSize; p += CHUNK_SIZE) { range = new HttpRange(p, (long) CHUNK_SIZE); lastChunk = blobsImpl.getChunkWithResponse(repositoryName, digest, range.toString(), context); writeChunk(lastChunk, sha256, channel); } } catch (AcrErrorsException exception) { throw LOGGER.logExceptionAsError(mapAcrErrorsException(exception)); } validateDigest(sha256, digest); return context; }
class ContainerRegistryContentClient { private static final ClientLogger LOGGER = new ClientLogger(ContainerRegistryContentClient.class); private final ContainerRegistryBlobsImpl blobsImpl; private final ContainerRegistriesImpl registriesImpl; private final String endpoint; private final String repositoryName; private final Tracer tracer; ContainerRegistryContentClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version, Tracer tracer) { this.repositoryName = repositoryName; this.endpoint = endpoint; AzureContainerRegistryImpl registryImplClient = new AzureContainerRegistryImpl(httpPipeline, endpoint, version); this.blobsImpl = registryImplClient.getContainerRegistryBlobs(); this.registriesImpl = registryImplClient.getContainerRegistries(); this.tracer = tracer; } /** * This method returns the registry's repository on which operations are being performed. * * @return The name of the repository */ public String getRepositoryName() { return repositoryName; } /** * This method returns the complete registry endpoint. * * @return The registry endpoint including the authority. */ public String getEndpoint() { return endpoint; } /** * Upload the OCI manifest to the repository. * * <p><strong>Code Samples:</strong></p> * * <!-- src_embed com.azure.containers.containerregistry.setManifest --> * <pre> * contentClient.setManifest& * </pre> * <!-- end com.azure.containers.containerregistry.setManifest --> * * @see <a href="https: * * @param manifest The {@link OciImageManifest} that needs to be updated. * @param tag Tag to apply on uploaded manifest. If {@code null} is passed, no tags will be applied. * @return upload result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code manifest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public SetManifestResult setManifest(OciImageManifest manifest, String tag) { Objects.requireNonNull(manifest, "'manifest' cannot be null."); return setManifestWithResponse(BinaryData.fromObject(manifest), tag, ManifestMediaType.OCI_MANIFEST, Context.NONE).getValue(); } /** * Uploads a manifest to the repository. * * <p><strong>Code Samples:</strong></p> * * <!-- src_embed com.azure.containers.containerregistry.uploadCustomManifest --> * <pre> * SetManifestOptions options = new SetManifestOptions& * * Response&lt;SetManifestResult&gt; response = contentClient.setManifestWithResponse& * System.out.println& * </pre> * <!-- end com.azure.containers.containerregistry.uploadCustomManifest --> * * @param options The options for the upload manifest operation. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The rest response containing the upload result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<SetManifestResult> setManifestWithResponse(SetManifestOptions options, Context context) { Objects.requireNonNull(options, "'options' cannot be null."); return setManifestWithResponse(options.getManifest(), options.getTag(), options.getManifestMediaType(), context); } /** * Uploads a blob to the repository in chunks of 4MB. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.containers.containerregistry.uploadBlob --> * <pre> * BinaryData configContent = BinaryData.fromObject& * * UploadRegistryBlobResult uploadResult = contentClient.uploadBlob& * System.out.printf& * </pre> * <!-- end com.azure.containers.containerregistry.uploadBlob --> * * <!-- src_embed com.azure.containers.containerregistry.uploadBlobErrorHandling --> * <pre> * BinaryData configContent = BinaryData.fromObject& * * try & * UploadRegistryBlobResult uploadResult = contentClient.uploadBlob& * System.out.printf& * uploadResult.getSizeInBytes& * & * if & * ResponseError error = & * System.out.printf& * if & * System.out.println& * & * & * & * & * </pre> * <!-- end com.azure.containers.containerregistry.uploadBlobErrorHandling --> * * @param content The blob content. The content may be loaded into memory depending on how {@link BinaryData} is created. * @return The upload response. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public UploadRegistryBlobResult uploadBlob(BinaryData content) { return uploadBlob(content, Context.NONE); } /** * Uploads a blob to the repository in chunks of 4MB. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.containers.containerregistry.uploadFile --> * <pre> * BinaryData content = BinaryData.fromFile& * UploadRegistryBlobResult uploadResult = contentClient.uploadBlob& * System.out.printf& * uploadResult.getDigest& * </pre> * <!-- end com.azure.containers.containerregistry.uploadFile --> * * @param content The blob content. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The upload response. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code stream} is {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public UploadRegistryBlobResult uploadBlob(BinaryData content, Context context) { Objects.requireNonNull(content, "'content' cannot be null."); InputStream stream = content.toStream(); try { return runWithTracing(UPLOAD_BLOB_SPAN_NAME, (span) -> uploadBlobInternal(stream, span), context); } finally { try { stream.close(); } catch (IOException e) { LOGGER.warning("Failed to close the stream", e); } } } /** * Download the manifest identified by the given tag or digest. * * <p><strong>Code Samples:</strong></p> * * Download manifest with tag: * * <!-- src_embed com.azure.containers.containerregistry.getManifestTag --> * <pre> * GetManifestResult latestResult = contentClient.getManifest& * if & * || ManifestMediaType.OCI_MANIFEST.equals& * OciImageManifest manifest = latestResult.getManifest& * & * throw new IllegalArgumentException& * & * </pre> * <!-- end com.azure.containers.containerregistry.getManifestTag --> * * Download manifest with digest: * * <!-- src_embed com.azure.containers.containerregistry.getManifestDigest --> * <pre> * GetManifestResult getManifestResult = contentClient.getManifest& * &quot;sha256:6581596932dc735fd0df8cc240e6c28845a66829126da5ce25b983cf244e2311&quot;& * </pre> * <!-- end com.azure.containers.containerregistry.getManifestDigest --> * * @param tagOrDigest Manifest tag or digest. * @return The manifest identified by the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public GetManifestResult getManifest(String tagOrDigest) { return getManifestWithResponse(tagOrDigest, Context.NONE).getValue(); } /** * Download the manifest of custom type identified by the given tag or digest. * * <p><strong>Code Samples:</strong></p> * * <!-- src_embed com.azure.containers.containerregistry.getManifestWithResponse --> * <pre> * Response&lt;GetManifestResult&gt; downloadResponse = contentClient.getManifestWithResponse& * Context.NONE& * System.out.printf& * downloadResponse.getStatusCode& * </pre> * <!-- end com.azure.containers.containerregistry.getManifestWithResponse --> * * @param tagOrDigest Manifest reference which can be tag or digest. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The response for the manifest identified by the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<GetManifestResult> getManifestWithResponse(String tagOrDigest, Context context) { Objects.requireNonNull(tagOrDigest, "'tagOrDigest' cannot be null."); try { Response<BinaryData> response = registriesImpl.getManifestWithResponse(repositoryName, tagOrDigest, SUPPORTED_MANIFEST_TYPES, context); return toGetManifestResponse(tagOrDigest, response); } catch (AcrErrorsException exception) { throw LOGGER.logExceptionAsError(mapAcrErrorsException(exception)); } } /** * Download the blob identified by the given digest. * * <p><strong>Code Samples:</strong></p> * * <!-- src_embed com.azure.containers.containerregistry.downloadStream --> * <pre> * Path file = Files.createTempFile& * SeekableByteChannel channel = Files.newByteChannel& * contentClient.downloadStream& * </pre> * <!-- end com.azure.containers.containerregistry.downloadStream --> * * @param digest The digest for the given image layer. * @param channel The channel to write content to. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. * @throws ServiceResponseException thrown if content hash does not match requested digest. */ @ServiceMethod(returns = ReturnType.SINGLE) public void downloadStream(String digest, WritableByteChannel channel) { downloadStream(digest, channel, Context.NONE); } /** * Download the blob identified by the given digest. * * @param digest The digest for the given image layer. * @param channel The channel to write content to. * @param context Additional context that is passed through the Http pipeline during the service call. * * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. * @throws ServiceResponseException thrown if content hash does not match requested digest. */ @ServiceMethod(returns = ReturnType.SINGLE) public void downloadStream(String digest, WritableByteChannel channel, Context context) { runWithTracing(DOWNLOAD_BLOB_SPAN_NAME, (span) -> downloadBlobInternal(digest, channel, span), context); } /** * Delete the image identified by the given digest * * <p><strong>Code Samples:</strong></p> * * <!-- src_embed readme-sample-deleteBlob --> * <pre> * GetManifestResult manifestResult = contentClient.getManifest& * * OciImageManifest manifest = manifestResult.getManifest& * for & * contentClient.deleteBlob& * & * </pre> * <!-- end readme-sample-deleteBlob --> * * @param digest The digest for the given image layer. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public void deleteBlob(String digest) { deleteBlobWithResponse(digest, Context.NONE).getValue(); } /** * Delete the image identified by the given digest * * @param digest The digest for the given image layer. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The REST response for the completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> deleteBlobWithResponse(String digest, Context context) { Objects.requireNonNull(digest, "'digest' cannot be null."); try { Response<Void> response = blobsImpl.deleteBlobWithResponse(repositoryName, digest, context); return deleteResponseToSuccess(response); } catch (HttpResponseException ex) { if (ex.getResponse().getStatusCode() == 404) { HttpResponse response = ex.getResponse(); return new SimpleResponse<>(response.getRequest(), 202, response.getHeaders(), null); } else { throw LOGGER.logExceptionAsError(ex); } } } /** * Delete the manifest identified by the given digest. * * <p><strong>Code Samples:</strong></p> * * <!-- src_embed readme-sample-deleteManifest --> * <pre> * GetManifestResult manifestResult = contentClient.getManifest& * contentClient.deleteManifest& * </pre> * <!-- end readme-sample-deleteManifest --> * * @param digest The digest of the manifest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public void deleteManifest(String digest) { deleteManifestWithResponse(digest, Context.NONE).getValue(); } /** * Delete the manifest identified by the given digest. * * @param digest The digest of the manifest. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The REST response for completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> deleteManifestWithResponse(String digest, Context context) { try { Response<Void> response = registriesImpl.deleteManifestWithResponse(repositoryName, digest, context); return UtilsImpl.deleteResponseToSuccess(response); } catch (AcrErrorsException exception) { throw LOGGER.logExceptionAsError(mapAcrErrorsException(exception)); } } private UploadRegistryBlobResult uploadBlobInternal(InputStream stream, Context context) { MessageDigest sha256 = createSha256(); byte[] buffer = new byte[CHUNK_SIZE]; try { ResponseBase<ContainerRegistryBlobsStartUploadHeaders, Void> startUploadResponse = blobsImpl.startUploadWithResponse(repositoryName, context); String location = getLocation(startUploadResponse); BinaryData chunk; long streamLength = 0L; while (true) { chunk = readChunk(stream, sha256, buffer); if (chunk == null) { break; } streamLength += chunk.getLength(); if (chunk.getLength() < CHUNK_SIZE) { break; } ResponseBase<ContainerRegistryBlobsUploadChunkHeaders, Void> uploadChunkResponse = blobsImpl.uploadChunkWithResponse(location, chunk, chunk.getLength(), context); location = getLocation(uploadChunkResponse); } String digest = "sha256:" + bytesToHexString(sha256.digest()); ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, Void> completeUploadResponse = blobsImpl.completeUploadWithResponse(digest, location, chunk, chunk == null ? null : chunk.getLength(), context); return ConstructorAccessors.createUploadRegistryBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest(), streamLength); } catch (AcrErrorsException exception) { throw LOGGER.logExceptionAsError(mapAcrErrorsException(exception)); } } private BinaryData readChunk(InputStream stream, MessageDigest sha256, byte[] buffer) { int position = 0; while (position < CHUNK_SIZE) { try { int read = stream.read(buffer, position, CHUNK_SIZE - position); if (read < 0) { break; } position += read; } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } } if (position == 0) { return null; } sha256.update(buffer, 0, position); ByteBuffer byteBuffer = ByteBuffer.wrap(buffer); byteBuffer.limit(position); return BinaryData.fromByteBuffer(byteBuffer); } private Response<SetManifestResult> setManifestWithResponse(BinaryData manifestData, String tagOrDigest, ManifestMediaType manifestMediaType, Context context) { BinaryData data = manifestData.toReplayableBinaryData(); if (tagOrDigest == null) { tagOrDigest = computeDigest(data.toByteBuffer()); } try { ResponseBase<ContainerRegistriesCreateManifestHeaders, Void> response = this.registriesImpl .createManifestWithResponse(repositoryName, tagOrDigest, data, data.getLength(), manifestMediaType.toString(), context); return new ResponseBase<>( response.getRequest(), response.getStatusCode(), response.getHeaders(), ConstructorAccessors.createSetManifestResult(response.getDeserializedHeaders().getDockerContentDigest()), response.getDeserializedHeaders()); } catch (AcrErrorsException exception) { throw LOGGER.logExceptionAsError(mapAcrErrorsException(exception)); } } private long writeChunk(Response<BinaryData> response, MessageDigest sha256, WritableByteChannel channel) { InputStream content = response.getValue().toStream(); ByteBuffer buffer = ByteBuffer.wrap(getBytes(content)); sha256.update(buffer.asReadOnlyBuffer()); try { channel.write(buffer); } catch (IOException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException(e)); } finally { try { content.close(); } catch (IOException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException(e)); } } return buffer.limit(); } private byte[] getBytes(InputStream stream) { try { ByteArrayOutputStream dataOutputBuffer = new ByteArrayOutputStream(); int nRead; byte[] data = new byte[8192]; while ((nRead = stream.read(data, 0, data.length)) != -1) { dataOutputBuffer.write(data, 0, nRead); } return dataOutputBuffer.toByteArray(); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } } private <T> T runWithTracing(String spanName, Function<Context, T> operation, Context context) { Context span = tracer.start(spanName, context); Exception exception = null; try { return operation.apply(span); } catch (RuntimeException ex) { exception = ex; throw ex; } finally { tracer.end(null, exception, span); } } }
class ContainerRegistryContentClient { private static final ClientLogger LOGGER = new ClientLogger(ContainerRegistryContentClient.class); private final ContainerRegistryBlobsImpl blobsImpl; private final ContainerRegistriesImpl registriesImpl; private final String endpoint; private final String repositoryName; private final Tracer tracer; ContainerRegistryContentClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version, Tracer tracer) { this.repositoryName = repositoryName; this.endpoint = endpoint; AzureContainerRegistryImpl registryImplClient = new AzureContainerRegistryImpl(httpPipeline, endpoint, version); this.blobsImpl = registryImplClient.getContainerRegistryBlobs(); this.registriesImpl = registryImplClient.getContainerRegistries(); this.tracer = tracer; } /** * This method returns the registry's repository on which operations are being performed. * * @return The name of the repository */ public String getRepositoryName() { return repositoryName; } /** * This method returns the complete registry endpoint. * * @return The registry endpoint including the authority. */ public String getEndpoint() { return endpoint; } /** * Upload the OCI manifest to the repository. * * <p><strong>Code Samples:</strong></p> * * <!-- src_embed com.azure.containers.containerregistry.setManifest --> * <pre> * contentClient.setManifest& * </pre> * <!-- end com.azure.containers.containerregistry.setManifest --> * * @see <a href="https: * * @param manifest The {@link OciImageManifest} that needs to be updated. * @param tag Tag to apply on uploaded manifest. If {@code null} is passed, no tags will be applied. * @return upload result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code manifest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public SetManifestResult setManifest(OciImageManifest manifest, String tag) { Objects.requireNonNull(manifest, "'manifest' cannot be null."); return setManifestWithResponse(BinaryData.fromObject(manifest), tag, ManifestMediaType.OCI_MANIFEST, Context.NONE).getValue(); } /** * Uploads a manifest to the repository. * * <p><strong>Code Samples:</strong></p> * * <!-- src_embed com.azure.containers.containerregistry.uploadCustomManifest --> * <pre> * SetManifestOptions options = new SetManifestOptions& * * Response&lt;SetManifestResult&gt; response = contentClient.setManifestWithResponse& * System.out.println& * </pre> * <!-- end com.azure.containers.containerregistry.uploadCustomManifest --> * * @param options The options for the upload manifest operation. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The rest response containing the upload result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<SetManifestResult> setManifestWithResponse(SetManifestOptions options, Context context) { Objects.requireNonNull(options, "'options' cannot be null."); return setManifestWithResponse(options.getManifest(), options.getTag(), options.getManifestMediaType(), context); } /** * Uploads a blob to the repository in chunks of 4MB. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.containers.containerregistry.uploadBlob --> * <pre> * BinaryData configContent = BinaryData.fromObject& * * UploadRegistryBlobResult uploadResult = contentClient.uploadBlob& * System.out.printf& * </pre> * <!-- end com.azure.containers.containerregistry.uploadBlob --> * * <!-- src_embed com.azure.containers.containerregistry.uploadBlobErrorHandling --> * <pre> * BinaryData configContent = BinaryData.fromObject& * * try & * UploadRegistryBlobResult uploadResult = contentClient.uploadBlob& * System.out.printf& * uploadResult.getSizeInBytes& * & * if & * ResponseError error = & * System.out.printf& * if & * System.out.println& * & * & * & * & * </pre> * <!-- end com.azure.containers.containerregistry.uploadBlobErrorHandling --> * * @param content The blob content. The content may be loaded into memory depending on how {@link BinaryData} is created. * @return The upload response. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public UploadRegistryBlobResult uploadBlob(BinaryData content) { return uploadBlob(content, Context.NONE); } /** * Uploads a blob to the repository in chunks of 4MB. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.containers.containerregistry.uploadFile --> * <pre> * BinaryData content = BinaryData.fromFile& * UploadRegistryBlobResult uploadResult = contentClient.uploadBlob& * System.out.printf& * uploadResult.getDigest& * </pre> * <!-- end com.azure.containers.containerregistry.uploadFile --> * * @param content The blob content. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The upload response. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code stream} is {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public UploadRegistryBlobResult uploadBlob(BinaryData content, Context context) { Objects.requireNonNull(content, "'content' cannot be null."); InputStream stream = content.toStream(); try { return runWithTracing(UPLOAD_BLOB_SPAN_NAME, (span) -> uploadBlobInternal(stream, span), context); } finally { try { stream.close(); } catch (IOException e) { LOGGER.warning("Failed to close the stream", e); } } } /** * Download the manifest identified by the given tag or digest. * * <p><strong>Code Samples:</strong></p> * * Download manifest with tag: * * <!-- src_embed com.azure.containers.containerregistry.getManifestTag --> * <pre> * GetManifestResult latestResult = contentClient.getManifest& * if & * || ManifestMediaType.OCI_MANIFEST.equals& * OciImageManifest manifest = latestResult.getManifest& * & * throw new IllegalArgumentException& * & * </pre> * <!-- end com.azure.containers.containerregistry.getManifestTag --> * * Download manifest with digest: * * <!-- src_embed com.azure.containers.containerregistry.getManifestDigest --> * <pre> * GetManifestResult getManifestResult = contentClient.getManifest& * &quot;sha256:6581596932dc735fd0df8cc240e6c28845a66829126da5ce25b983cf244e2311&quot;& * </pre> * <!-- end com.azure.containers.containerregistry.getManifestDigest --> * * @param tagOrDigest Manifest tag or digest. * @return The manifest identified by the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public GetManifestResult getManifest(String tagOrDigest) { return getManifestWithResponse(tagOrDigest, Context.NONE).getValue(); } /** * Download the manifest of custom type identified by the given tag or digest. * * <p><strong>Code Samples:</strong></p> * * <!-- src_embed com.azure.containers.containerregistry.getManifestWithResponse --> * <pre> * Response&lt;GetManifestResult&gt; downloadResponse = contentClient.getManifestWithResponse& * Context.NONE& * System.out.printf& * downloadResponse.getStatusCode& * </pre> * <!-- end com.azure.containers.containerregistry.getManifestWithResponse --> * * @param tagOrDigest Manifest reference which can be tag or digest. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The response for the manifest identified by the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<GetManifestResult> getManifestWithResponse(String tagOrDigest, Context context) { Objects.requireNonNull(tagOrDigest, "'tagOrDigest' cannot be null."); try { Response<BinaryData> response = registriesImpl.getManifestWithResponse(repositoryName, tagOrDigest, SUPPORTED_MANIFEST_TYPES, context); return toGetManifestResponse(tagOrDigest, response); } catch (AcrErrorsException exception) { throw LOGGER.logExceptionAsError(mapAcrErrorsException(exception)); } } /** * Download the blob identified by the given digest. * * <p><strong>Code Samples:</strong></p> * * <!-- src_embed com.azure.containers.containerregistry.downloadStream --> * <pre> * Path file = Files.createTempFile& * SeekableByteChannel channel = Files.newByteChannel& * contentClient.downloadStream& * </pre> * <!-- end com.azure.containers.containerregistry.downloadStream --> * * @param digest The digest for the given image layer. * @param channel The channel to write content to. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. * @throws ServiceResponseException thrown if content hash does not match requested digest. */ @ServiceMethod(returns = ReturnType.SINGLE) public void downloadStream(String digest, WritableByteChannel channel) { downloadStream(digest, channel, Context.NONE); } /** * Download the blob identified by the given digest. * * @param digest The digest for the given image layer. * @param channel The channel to write content to. * @param context Additional context that is passed through the Http pipeline during the service call. * * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. * @throws ServiceResponseException thrown if content hash does not match requested digest. */ @ServiceMethod(returns = ReturnType.SINGLE) public void downloadStream(String digest, WritableByteChannel channel, Context context) { runWithTracing(DOWNLOAD_BLOB_SPAN_NAME, (span) -> downloadBlobInternal(digest, channel, span), context); } /** * Delete the image identified by the given digest * * <p><strong>Code Samples:</strong></p> * * <!-- src_embed readme-sample-deleteBlob --> * <pre> * GetManifestResult manifestResult = contentClient.getManifest& * * OciImageManifest manifest = manifestResult.getManifest& * for & * contentClient.deleteBlob& * & * </pre> * <!-- end readme-sample-deleteBlob --> * * @param digest The digest for the given image layer. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public void deleteBlob(String digest) { deleteBlobWithResponse(digest, Context.NONE).getValue(); } /** * Delete the image identified by the given digest * * @param digest The digest for the given image layer. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The REST response for the completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> deleteBlobWithResponse(String digest, Context context) { Objects.requireNonNull(digest, "'digest' cannot be null."); try { Response<Void> response = blobsImpl.deleteBlobWithResponse(repositoryName, digest, context); return deleteResponseToSuccess(response); } catch (HttpResponseException ex) { if (ex.getResponse().getStatusCode() == 404) { HttpResponse response = ex.getResponse(); return new SimpleResponse<>(response.getRequest(), 202, response.getHeaders(), null); } else { throw LOGGER.logExceptionAsError(ex); } } } /** * Delete the manifest identified by the given digest. * * <p><strong>Code Samples:</strong></p> * * <!-- src_embed readme-sample-deleteManifest --> * <pre> * GetManifestResult manifestResult = contentClient.getManifest& * contentClient.deleteManifest& * </pre> * <!-- end readme-sample-deleteManifest --> * * @param digest The digest of the manifest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public void deleteManifest(String digest) { deleteManifestWithResponse(digest, Context.NONE).getValue(); } /** * Delete the manifest identified by the given digest. * * @param digest The digest of the manifest. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The REST response for completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> deleteManifestWithResponse(String digest, Context context) { try { Response<Void> response = registriesImpl.deleteManifestWithResponse(repositoryName, digest, context); return UtilsImpl.deleteResponseToSuccess(response); } catch (AcrErrorsException exception) { throw LOGGER.logExceptionAsError(mapAcrErrorsException(exception)); } } private UploadRegistryBlobResult uploadBlobInternal(InputStream stream, Context context) { MessageDigest sha256 = createSha256(); byte[] buffer = new byte[CHUNK_SIZE]; try { ResponseBase<ContainerRegistryBlobsStartUploadHeaders, Void> startUploadResponse = blobsImpl.startUploadWithResponse(repositoryName, context); String location = getLocation(startUploadResponse); BinaryData chunk; long streamLength = 0L; while (true) { chunk = readChunk(stream, sha256, buffer); if (chunk == null) { break; } streamLength += chunk.getLength(); if (chunk.getLength() < CHUNK_SIZE) { break; } ResponseBase<ContainerRegistryBlobsUploadChunkHeaders, Void> uploadChunkResponse = blobsImpl.uploadChunkWithResponse(location, chunk, chunk.getLength(), context); location = getLocation(uploadChunkResponse); } String digest = "sha256:" + bytesToHexString(sha256.digest()); ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, Void> completeUploadResponse = blobsImpl.completeUploadWithResponse(digest, location, chunk, chunk == null ? null : chunk.getLength(), context); return ConstructorAccessors.createUploadRegistryBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest(), streamLength); } catch (AcrErrorsException exception) { throw LOGGER.logExceptionAsError(mapAcrErrorsException(exception)); } } private BinaryData readChunk(InputStream stream, MessageDigest sha256, byte[] buffer) { int position = 0; while (position < CHUNK_SIZE) { try { int read = stream.read(buffer, position, CHUNK_SIZE - position); if (read < 0) { break; } position += read; } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } } if (position == 0) { return null; } sha256.update(buffer, 0, position); ByteBuffer byteBuffer = ByteBuffer.wrap(buffer); byteBuffer.limit(position); return BinaryData.fromByteBuffer(byteBuffer); } private Response<SetManifestResult> setManifestWithResponse(BinaryData manifestData, String tagOrDigest, ManifestMediaType manifestMediaType, Context context) { BinaryData data = manifestData.toReplayableBinaryData(); if (tagOrDigest == null) { tagOrDigest = computeDigest(data.toByteBuffer()); } try { ResponseBase<ContainerRegistriesCreateManifestHeaders, Void> response = this.registriesImpl .createManifestWithResponse(repositoryName, tagOrDigest, data, data.getLength(), manifestMediaType.toString(), context); return new ResponseBase<>( response.getRequest(), response.getStatusCode(), response.getHeaders(), ConstructorAccessors.createSetManifestResult(response.getDeserializedHeaders().getDockerContentDigest()), response.getDeserializedHeaders()); } catch (AcrErrorsException exception) { throw LOGGER.logExceptionAsError(mapAcrErrorsException(exception)); } } private long writeChunk(Response<BinaryData> response, MessageDigest sha256, WritableByteChannel channel) { InputStream content = response.getValue().toStream(); ByteBuffer buffer = ByteBuffer.wrap(getBytes(content)); sha256.update(buffer.asReadOnlyBuffer()); try { channel.write(buffer); } catch (IOException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException(e)); } finally { try { content.close(); } catch (IOException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException(e)); } } return buffer.limit(); } private byte[] getBytes(InputStream stream) { try { ByteArrayOutputStream dataOutputBuffer = new ByteArrayOutputStream(); int nRead; byte[] data = new byte[8192]; while ((nRead = stream.read(data, 0, data.length)) != -1) { dataOutputBuffer.write(data, 0, nRead); } return dataOutputBuffer.toByteArray(); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } } private <T> T runWithTracing(String spanName, Function<Context, T> operation, Context context) { Context span = tracer.start(spanName, context); Exception exception = null; try { return operation.apply(span); } catch (RuntimeException ex) { exception = ex; throw ex; } finally { tracer.end(null, exception, span); } } }
This didn't throw here before, are we okay with that runtime change?
public static Response<GetManifestResult> toGetManifestResponse(String tagOrDigest, Response<BinaryData> rawResponse) { checkManifestSize(rawResponse.getHeaders()); String digest = rawResponse.getHeaders().getValue(DOCKER_DIGEST_HEADER_NAME); String responseSha256 = computeDigest(rawResponse.getValue().toByteBuffer()); if (!Objects.equals(responseSha256, digest) || (isDigest(tagOrDigest) && !Objects.equals(responseSha256, tagOrDigest))) { throw LOGGER.logExceptionAsError(new ServiceResponseException("The digest in the response does not match the expected digest.")); } String contentType = rawResponse.getHeaders().getValue(HttpHeaderName.CONTENT_TYPE); ManifestMediaType responseMediaType = contentType != null ? ManifestMediaType.fromString(contentType) : null; return new SimpleResponse<>( rawResponse.getRequest(), rawResponse.getStatusCode(), rawResponse.getHeaders(), ConstructorAccessors.createGetManifestResult(digest, responseMediaType, rawResponse.getValue())); }
checkManifestSize(rawResponse.getHeaders());
public static Response<GetManifestResult> toGetManifestResponse(String tagOrDigest, Response<BinaryData> rawResponse) { checkManifestSize(rawResponse.getHeaders()); String digest = rawResponse.getHeaders().getValue(DOCKER_DIGEST_HEADER_NAME); String responseSha256 = computeDigest(rawResponse.getValue().toByteBuffer()); if (!Objects.equals(responseSha256, digest) || (isDigest(tagOrDigest) && !Objects.equals(responseSha256, tagOrDigest))) { throw LOGGER.logExceptionAsError(new ServiceResponseException("The digest in the response does not match the expected digest.")); } String contentType = rawResponse.getHeaders().getValue(HttpHeaderName.CONTENT_TYPE); ManifestMediaType responseMediaType = contentType != null ? ManifestMediaType.fromString(contentType) : null; return new SimpleResponse<>( rawResponse.getRequest(), rawResponse.getStatusCode(), rawResponse.getHeaders(), ConstructorAccessors.createGetManifestResult(digest, responseMediaType, rawResponse.getValue())); }
class UtilsImpl { private static final ClientLogger LOGGER = new ClientLogger(UtilsImpl.class); private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-containers-containerregistry.properties"); private static final String CLIENT_NAME = PROPERTIES.getOrDefault("name", "UnknownName"); private static final String CLIENT_VERSION = PROPERTIES.getOrDefault("version", "UnknownVersion"); private static final ContainerRegistryAudience ACR_ACCESS_TOKEN_AUDIENCE = ContainerRegistryAudience.fromString("https: private static final int HTTP_STATUS_CODE_NOT_FOUND = 404; private static final int HTTP_STATUS_CODE_ACCEPTED = 202; public static final HttpHeaderName DOCKER_DIGEST_HEADER_NAME = HttpHeaderName.fromString("docker-content-digest"); public static final String SUPPORTED_MANIFEST_TYPES = "*/*" + "," + ManifestMediaType.OCI_MANIFEST + "," + ManifestMediaType.DOCKER_MANIFEST + ",application/vnd.oci.image.index.v1+json" + ",application/vnd.docker.distribution.manifest.list.v2+json" + ",application/vnd.cncf.oras.artifact.manifest.v1+json"; private static final String CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE = "Microsoft.ContainerRegistry"; public static final int CHUNK_SIZE = 4 * 1024 * 1024; public static final int MAX_MANIFEST_SIZE = 4 * 1024 * 1024; public static final String UPLOAD_BLOB_SPAN_NAME = "ContainerRegistryContentAsyncClient.uploadBlob"; public static final String DOWNLOAD_BLOB_SPAN_NAME = "ContainerRegistryContentAsyncClient.downloadBlob"; private UtilsImpl() { } /** * This method builds the httpPipeline for the builders. * @param clientOptions The client options * @param logOptions http log options. * @param configuration configuration settings. * @param retryPolicy retry policy * @param retryOptions retry options * @param credential credentials. * @param audience the audience. * @param perCallPolicies per call policies. * @param perRetryPolicies per retry policies. * @param httpClient http client * @param endpoint endpoint to be called * @param serviceVersion the service api version being targeted by the client. * @return returns the httpPipeline to be consumed by the builders. */ public static HttpPipeline buildClientPipeline( ClientOptions clientOptions, HttpLogOptions logOptions, Configuration configuration, RetryPolicy retryPolicy, RetryOptions retryOptions, TokenCredential credential, ContainerRegistryAudience audience, List<HttpPipelinePolicy> perCallPolicies, List<HttpPipelinePolicy> perRetryPolicies, HttpClient httpClient, String endpoint, ContainerRegistryServiceVersion serviceVersion, Tracer tracer) { if (credential == null) { LOGGER.verbose("Credentials are null, enabling anonymous access"); } if (audience == null) { LOGGER.info("Audience is not specified, defaulting to ACR access token scope."); audience = ACR_ACCESS_TOKEN_AUDIENCE; } if (serviceVersion == null) { serviceVersion = ContainerRegistryServiceVersion.getLatest(); } HttpPipeline credentialsPipeline = buildPipeline(clientOptions, logOptions, configuration, retryPolicy, retryOptions, null, null, perCallPolicies, perRetryPolicies, httpClient, tracer); return buildPipeline(clientOptions, logOptions, configuration, retryPolicy, retryOptions, buildCredentialsPolicy(credentialsPipeline, credential, audience, endpoint, serviceVersion), new RedirectPolicy(), perCallPolicies, perRetryPolicies, httpClient, tracer); } private static ContainerRegistryCredentialsPolicy buildCredentialsPolicy(HttpPipeline credentialPipeline, TokenCredential credential, ContainerRegistryAudience audience, String endpoint, ContainerRegistryServiceVersion serviceVersion) { AzureContainerRegistryImpl acrClient = new AzureContainerRegistryImpl( credentialPipeline, endpoint, serviceVersion.getVersion()); ContainerRegistryTokenService tokenService = new ContainerRegistryTokenService(credential, audience, acrClient); return new ContainerRegistryCredentialsPolicy(tokenService, audience + "/.default"); } private static HttpPipeline buildPipeline(ClientOptions clientOptions, HttpLogOptions logOptions, Configuration configuration, RetryPolicy retryPolicy, RetryOptions retryOptions, ContainerRegistryCredentialsPolicy credentialPolicy, RedirectPolicy redirectPolicy, List<HttpPipelinePolicy> perCallPolicies, List<HttpPipelinePolicy> perRetryPolicies, HttpClient httpClient, Tracer tracer) { ArrayList<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add( new UserAgentPolicy(CoreUtils.getApplicationId(clientOptions, logOptions), CLIENT_NAME, CLIENT_VERSION, configuration)); policies.add(new RequestIdPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions)); policies.add(new CookiePolicy()); policies.add(new AddDatePolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); if (credentialPolicy != null) { policies.add(credentialPolicy); } if (redirectPolicy != null) { policies.add(redirectPolicy); } policies.addAll(perRetryPolicies); policies.add(new HttpLoggingPolicy(logOptions)); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .clientOptions(clientOptions) .tracer(tracer) .build(); } public static Tracer createTracer(ClientOptions clientOptions) { TracingOptions tracingOptions = clientOptions == null ? null : clientOptions.getTracingOptions(); return TracerProvider.getDefaultProvider() .createTracer(CLIENT_NAME, CLIENT_VERSION, CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE, tracingOptions); } /** * This method computes the digest for the buffer content. * Docker digest is a SHA256 hash of the docker image content and is deterministic based on the image build. * @param buffer The buffer containing the image bytes. * @return SHA-256 digest for the given buffer. */ public static String computeDigest(ByteBuffer buffer) { MessageDigest md = createSha256(); md.update(buffer.asReadOnlyBuffer()); return "sha256:" + bytesToHexString(md.digest()); } public static MessageDigest createSha256() { try { return MessageDigest.getInstance("SHA-256"); } catch (NoSuchAlgorithmException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } public static void validateDigest(MessageDigest messageDigest, String requestedDigest) { String sha256 = bytesToHexString(messageDigest.digest()); if (isDigest(requestedDigest) && !requestedDigest.endsWith(sha256)) { throw LOGGER.atError() .addKeyValue("requestedDigest", requestedDigest) .addKeyValue("actualDigest", () -> "sha256:" + sha256) .log(new ServiceResponseException("The digest in the response does not match the expected digest.")); } } private static long checkManifestSize(HttpHeaders headers) { String contentLengthString = headers.getValue(HttpHeaderName.CONTENT_LENGTH); if (CoreUtils.isNullOrEmpty(contentLengthString)) { throw LOGGER.logExceptionAsError(new ServiceResponseException("Response does not include `Content-Length` header")); } try { long contentLength = Long.parseLong(contentLengthString); if (contentLength > MAX_MANIFEST_SIZE) { throw LOGGER.atError() .addKeyValue("contentLength", contentLengthString) .log(new ServiceResponseException("Manifest size is bigger than 4MB")); } return contentLength; } catch (NumberFormatException | NullPointerException e) { throw LOGGER.atError() .addKeyValue("contentLength", contentLengthString) .log(new ServiceResponseException("Could not parse `Content-Length` header")); } } /** * Delete operation should be idempotent. * And so should result in a success in case the service response is 400 : Not found. * @param responseT The response object. * @param <T> The encapsulating value. * @return The transformed response object. */ public static <T> Response<Void> deleteResponseToSuccess(Response<T> responseT) { if (responseT.getStatusCode() != HTTP_STATUS_CODE_NOT_FOUND) { return getAcceptedDeleteResponse(responseT, responseT.getStatusCode()); } return getAcceptedDeleteResponse(responseT, HTTP_STATUS_CODE_ACCEPTED); } private static <T> Response<Void> getAcceptedDeleteResponse(Response<T> responseT, int statusCode) { return new SimpleResponse<>( responseT.getRequest(), statusCode, responseT.getHeaders(), null); } /** * This method converts AcrErrors inside AcrErrorsException into {@link HttpResponseException} * with {@link ResponseError} */ public static HttpResponseException mapAcrErrorsException(AcrErrorsException acrException) { final HttpResponse errorHttpResponse = acrException.getResponse(); if (acrException.getValue() != null && !CoreUtils.isNullOrEmpty(acrException.getValue().getErrors())) { AcrErrorInfo first = acrException.getValue().getErrors().get(0); ResponseError error = new ResponseError(first.getCode(), first.getMessage()); switch (errorHttpResponse.getStatusCode()) { case 401: throw new ClientAuthenticationException(acrException.getMessage(), acrException.getResponse(), error); case 404: return new ResourceNotFoundException(acrException.getMessage(), acrException.getResponse(), error); case 409: return new ResourceExistsException(acrException.getMessage(), acrException.getResponse(), error); case 412: return new ResourceModifiedException(acrException.getMessage(), acrException.getResponse(), error); default: return new HttpResponseException(acrException.getMessage(), acrException.getResponse(), error); } } return acrException; } /** * This method parses the response to get the continuation token used to make the next pagination call. * The continuation token is returned by the service in the form of a header and not as a nextLink field. * @param listResponse response that is parsed. * @param <T> the model type that is being operated on. * @return paged response with the correct continuation token. */ public static <T> PagedResponse<T> getPagedResponseWithContinuationToken(PagedResponse<T> listResponse) { return getPagedResponseWithContinuationToken(listResponse, values -> values); } /** * This method parses the response to get the continuation token used to make the next pagination call. * The continuation token is returned by the service in the form of a header and not as a nextLink field. * * <p> * Per the Docker v2 HTTP API spec, the Link header is an RFC5988 * compliant rel='next' with URL to next result set, if available. * See: https: * * The URI reference can be obtained from link-value as follows: * Link = "Link" ":" * link-value = "<" URI-Reference ">" * (";" link-param ) * See: https: * </p> * @param listResponse response that is parsed. * @param mapperFunction the function that maps the rest api response into the public model exposed by the client. * @param <T> The model type returned by the rest client. * @param <R> The model type returned by the public client. * @return paged response with the correct continuation token. */ public static <T, R> PagedResponse<T> getPagedResponseWithContinuationToken(PagedResponse<R> listResponse, Function<List<R>, List<T>> mapperFunction) { Objects.requireNonNull(mapperFunction); String continuationLink = getContinuationLink(listResponse.getHeaders()); List<T> values = mapperFunction.apply(listResponse.getValue()); return new PagedResponseBase<String, T>( listResponse.getRequest(), listResponse.getStatusCode(), listResponse.getHeaders(), values, continuationLink, null ); } private static String getContinuationLink(HttpHeaders headers) { String continuationLinkHeader = headers.getValue(HttpHeaderName.LINK); if (!CoreUtils.isNullOrEmpty(continuationLinkHeader) && continuationLinkHeader.charAt(0) == '<') { int endIndex = continuationLinkHeader.indexOf(">;"); if (endIndex < 2) { LOGGER.warning("unexpected 'Link' header value - '{}'", continuationLinkHeader); } return continuationLinkHeader.substring(1, endIndex); } return null; } public static List<ArtifactManifestProperties> mapManifestsProperties(List<ManifestAttributesBase> baseArtifacts, String repositoryName, String registryLoginServer) { if (baseArtifacts == null) { return null; } List<ArtifactManifestProperties> artifactManifestProperties = new ArrayList<>(baseArtifacts.size()); for (ManifestAttributesBase base : baseArtifacts) { ArtifactManifestPropertiesInternal internal = new ArtifactManifestPropertiesInternal() .setRegistryLoginServer(registryLoginServer) .setRepositoryName(repositoryName) .setDigest(base.getDigest()) .setSizeInBytes(base.getSizeInBytes()) .setCreatedOn(base.getCreatedOn()) .setLastUpdatedOn(base.getLastUpdatedOn()) .setArchitecture(base.getArchitecture()) .setOperatingSystem(base.getOperatingSystem()) .setRelatedArtifacts(base.getRelatedArtifacts()) .setTags(base.getTags()) .setDeleteEnabled(base.isDeleteEnabled()) .setWriteEnabled(base.isWriteEnabled()) .setListEnabled(base.isListEnabled()) .setReadEnabled(base.isReadEnabled()); artifactManifestProperties.add(ArtifactManifestPropertiesHelper.create(internal)); } return artifactManifestProperties; } public static List<ArtifactTagProperties> getTagProperties(List<TagAttributesBase> baseValues, String repositoryName) { Objects.requireNonNull(baseValues); List<ArtifactTagProperties> artifactTagProperties = new ArrayList<>(baseValues.size()); for (TagAttributesBase base : baseValues) { ArtifactTagPropertiesInternal internal = new ArtifactTagPropertiesInternal() .setRepositoryName(repositoryName) .setName(base.getName()) .setDigest(base.getDigest()) .setCreatedOn(base.getCreatedOn()) .setLastUpdatedOn(base.getLastUpdatedOn()) .setDeleteEnabled(base.isDeleteEnabled()) .setWriteEnabled(base.isWriteEnabled()) .setListEnabled(base.isListEnabled()) .setReadEnabled(base.isReadEnabled()); artifactTagProperties.add(ArtifactTagPropertiesHelper.create(internal)); } return artifactTagProperties; } public static void validateResponseHeaderDigest(String requestedDigest, HttpHeaders headers) { String responseHeaderDigest = headers.getValue(DOCKER_DIGEST_HEADER_NAME); if (!requestedDigest.equals(responseHeaderDigest)) { throw LOGGER.atError() .addKeyValue("requestedDigest", requestedDigest) .addKeyValue("responseDigest", responseHeaderDigest) .log(new ServiceResponseException("The digest in the response header does not match the expected digest.")); } } public static <H, T> String getLocation(ResponseBase<H, T> response) { String locationHeader = response.getHeaders().getValue(HttpHeaderName.LOCATION); if (locationHeader != null && locationHeader.startsWith("/")) { return locationHeader.substring(1); } return locationHeader; } public static long getBlobSize(HttpHeader contentRangeHeader) { Exception cause = null; if (contentRangeHeader != null) { int slashInd = contentRangeHeader.getValue().indexOf('/'); if (slashInd > 0) { try { long blobSize = Long.parseLong(contentRangeHeader.getValue().substring(slashInd + 1)); if (blobSize > 0) { return blobSize; } } catch (NumberFormatException ex) { cause = ex; } } } throw LOGGER.atError() .addKeyValue("contentRange", contentRangeHeader) .log(new ServiceResponseException("Invalid content-range header in response", cause)); } /** * Checks if string represents tag or digest. * * @param tagOrDigest string to check * @return true if digest, false otherwise. */ public static boolean isDigest(String tagOrDigest) { return tagOrDigest.length() == 71 && tagOrDigest.startsWith("sha256:"); } public static String formatFullyQualifiedReference(String endpoint, String repositoryName, String tagOrDigest) { try { URL endpointUrl = new URL(endpoint); return endpointUrl.getHost() + "/" + repositoryName + (isDigest(tagOrDigest) ? "@" : ":") + tagOrDigest; } catch (MalformedURLException ex) { throw LOGGER.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL", ex)); } } }
class UtilsImpl { private static final ClientLogger LOGGER = new ClientLogger(UtilsImpl.class); private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-containers-containerregistry.properties"); private static final String CLIENT_NAME = PROPERTIES.getOrDefault("name", "UnknownName"); private static final String CLIENT_VERSION = PROPERTIES.getOrDefault("version", "UnknownVersion"); private static final ContainerRegistryAudience ACR_ACCESS_TOKEN_AUDIENCE = ContainerRegistryAudience.fromString("https: private static final int HTTP_STATUS_CODE_NOT_FOUND = 404; private static final int HTTP_STATUS_CODE_ACCEPTED = 202; public static final HttpHeaderName DOCKER_DIGEST_HEADER_NAME = HttpHeaderName.fromString("docker-content-digest"); public static final String SUPPORTED_MANIFEST_TYPES = "*/*" + "," + ManifestMediaType.OCI_MANIFEST + "," + ManifestMediaType.DOCKER_MANIFEST + ",application/vnd.oci.image.index.v1+json" + ",application/vnd.docker.distribution.manifest.list.v2+json" + ",application/vnd.cncf.oras.artifact.manifest.v1+json"; private static final String CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE = "Microsoft.ContainerRegistry"; public static final int CHUNK_SIZE = 4 * 1024 * 1024; public static final int MAX_MANIFEST_SIZE = 4 * 1024 * 1024; public static final String UPLOAD_BLOB_SPAN_NAME = "ContainerRegistryContentAsyncClient.uploadBlob"; public static final String DOWNLOAD_BLOB_SPAN_NAME = "ContainerRegistryContentAsyncClient.downloadBlob"; private UtilsImpl() { } /** * This method builds the httpPipeline for the builders. * @param clientOptions The client options * @param logOptions http log options. * @param configuration configuration settings. * @param retryPolicy retry policy * @param retryOptions retry options * @param credential credentials. * @param audience the audience. * @param perCallPolicies per call policies. * @param perRetryPolicies per retry policies. * @param httpClient http client * @param endpoint endpoint to be called * @param serviceVersion the service api version being targeted by the client. * @return returns the httpPipeline to be consumed by the builders. */ public static HttpPipeline buildClientPipeline( ClientOptions clientOptions, HttpLogOptions logOptions, Configuration configuration, RetryPolicy retryPolicy, RetryOptions retryOptions, TokenCredential credential, ContainerRegistryAudience audience, List<HttpPipelinePolicy> perCallPolicies, List<HttpPipelinePolicy> perRetryPolicies, HttpClient httpClient, String endpoint, ContainerRegistryServiceVersion serviceVersion, Tracer tracer) { if (credential == null) { LOGGER.verbose("Credentials are null, enabling anonymous access"); } if (audience == null) { LOGGER.info("Audience is not specified, defaulting to ACR access token scope."); audience = ACR_ACCESS_TOKEN_AUDIENCE; } if (serviceVersion == null) { serviceVersion = ContainerRegistryServiceVersion.getLatest(); } HttpPipeline credentialsPipeline = buildPipeline(clientOptions, logOptions, configuration, retryPolicy, retryOptions, null, null, perCallPolicies, perRetryPolicies, httpClient, tracer); return buildPipeline(clientOptions, logOptions, configuration, retryPolicy, retryOptions, buildCredentialsPolicy(credentialsPipeline, credential, audience, endpoint, serviceVersion), new RedirectPolicy(), perCallPolicies, perRetryPolicies, httpClient, tracer); } private static ContainerRegistryCredentialsPolicy buildCredentialsPolicy(HttpPipeline credentialPipeline, TokenCredential credential, ContainerRegistryAudience audience, String endpoint, ContainerRegistryServiceVersion serviceVersion) { AzureContainerRegistryImpl acrClient = new AzureContainerRegistryImpl( credentialPipeline, endpoint, serviceVersion.getVersion()); ContainerRegistryTokenService tokenService = new ContainerRegistryTokenService(credential, audience, acrClient); return new ContainerRegistryCredentialsPolicy(tokenService, audience + "/.default"); } private static HttpPipeline buildPipeline(ClientOptions clientOptions, HttpLogOptions logOptions, Configuration configuration, RetryPolicy retryPolicy, RetryOptions retryOptions, ContainerRegistryCredentialsPolicy credentialPolicy, RedirectPolicy redirectPolicy, List<HttpPipelinePolicy> perCallPolicies, List<HttpPipelinePolicy> perRetryPolicies, HttpClient httpClient, Tracer tracer) { ArrayList<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add( new UserAgentPolicy(CoreUtils.getApplicationId(clientOptions, logOptions), CLIENT_NAME, CLIENT_VERSION, configuration)); policies.add(new RequestIdPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions)); policies.add(new CookiePolicy()); policies.add(new AddDatePolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); if (credentialPolicy != null) { policies.add(credentialPolicy); } if (redirectPolicy != null) { policies.add(redirectPolicy); } policies.addAll(perRetryPolicies); policies.add(new HttpLoggingPolicy(logOptions)); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .clientOptions(clientOptions) .tracer(tracer) .build(); } public static Tracer createTracer(ClientOptions clientOptions) { TracingOptions tracingOptions = clientOptions == null ? null : clientOptions.getTracingOptions(); return TracerProvider.getDefaultProvider() .createTracer(CLIENT_NAME, CLIENT_VERSION, CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE, tracingOptions); } /** * This method computes the digest for the buffer content. * Docker digest is a SHA256 hash of the docker image content and is deterministic based on the image build. * @param buffer The buffer containing the image bytes. * @return SHA-256 digest for the given buffer. */ public static String computeDigest(ByteBuffer buffer) { MessageDigest md = createSha256(); md.update(buffer.asReadOnlyBuffer()); return "sha256:" + bytesToHexString(md.digest()); } public static MessageDigest createSha256() { try { return MessageDigest.getInstance("SHA-256"); } catch (NoSuchAlgorithmException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } public static void validateDigest(MessageDigest messageDigest, String requestedDigest) { String sha256 = bytesToHexString(messageDigest.digest()); if (isDigest(requestedDigest) && !requestedDigest.endsWith(sha256)) { throw LOGGER.atError() .addKeyValue("requestedDigest", requestedDigest) .addKeyValue("actualDigest", () -> "sha256:" + sha256) .log(new ServiceResponseException("The digest in the response does not match the expected digest.")); } } private static long checkManifestSize(HttpHeaders headers) { String contentLengthString = headers.getValue(HttpHeaderName.CONTENT_LENGTH); if (CoreUtils.isNullOrEmpty(contentLengthString)) { throw LOGGER.logExceptionAsError(new ServiceResponseException("Response does not include `Content-Length` header")); } try { long contentLength = Long.parseLong(contentLengthString); if (contentLength > MAX_MANIFEST_SIZE) { throw LOGGER.atError() .addKeyValue("contentLength", contentLengthString) .log(new ServiceResponseException("Manifest size is bigger than 4MB")); } return contentLength; } catch (NumberFormatException | NullPointerException e) { throw LOGGER.atError() .addKeyValue("contentLength", contentLengthString) .log(new ServiceResponseException("Could not parse `Content-Length` header")); } } /** * Delete operation should be idempotent. * And so should result in a success in case the service response is 400 : Not found. * @param responseT The response object. * @param <T> The encapsulating value. * @return The transformed response object. */ public static <T> Response<Void> deleteResponseToSuccess(Response<T> responseT) { if (responseT.getStatusCode() != HTTP_STATUS_CODE_NOT_FOUND) { return getAcceptedDeleteResponse(responseT, responseT.getStatusCode()); } return getAcceptedDeleteResponse(responseT, HTTP_STATUS_CODE_ACCEPTED); } private static <T> Response<Void> getAcceptedDeleteResponse(Response<T> responseT, int statusCode) { return new SimpleResponse<>( responseT.getRequest(), statusCode, responseT.getHeaders(), null); } /** * This method converts AcrErrors inside AcrErrorsException into {@link HttpResponseException} * with {@link ResponseError} */ public static HttpResponseException mapAcrErrorsException(AcrErrorsException acrException) { final HttpResponse errorHttpResponse = acrException.getResponse(); if (acrException.getValue() != null && !CoreUtils.isNullOrEmpty(acrException.getValue().getErrors())) { AcrErrorInfo first = acrException.getValue().getErrors().get(0); ResponseError error = new ResponseError(first.getCode(), first.getMessage()); switch (errorHttpResponse.getStatusCode()) { case 401: throw new ClientAuthenticationException(acrException.getMessage(), acrException.getResponse(), error); case 404: return new ResourceNotFoundException(acrException.getMessage(), acrException.getResponse(), error); case 409: return new ResourceExistsException(acrException.getMessage(), acrException.getResponse(), error); case 412: return new ResourceModifiedException(acrException.getMessage(), acrException.getResponse(), error); default: return new HttpResponseException(acrException.getMessage(), acrException.getResponse(), error); } } return acrException; } /** * This method parses the response to get the continuation token used to make the next pagination call. * The continuation token is returned by the service in the form of a header and not as a nextLink field. * @param listResponse response that is parsed. * @param <T> the model type that is being operated on. * @return paged response with the correct continuation token. */ public static <T> PagedResponse<T> getPagedResponseWithContinuationToken(PagedResponse<T> listResponse) { return getPagedResponseWithContinuationToken(listResponse, values -> values); } /** * This method parses the response to get the continuation token used to make the next pagination call. * The continuation token is returned by the service in the form of a header and not as a nextLink field. * * <p> * Per the Docker v2 HTTP API spec, the Link header is an RFC5988 * compliant rel='next' with URL to next result set, if available. * See: https: * * The URI reference can be obtained from link-value as follows: * Link = "Link" ":" * link-value = "<" URI-Reference ">" * (";" link-param ) * See: https: * </p> * @param listResponse response that is parsed. * @param mapperFunction the function that maps the rest api response into the public model exposed by the client. * @param <T> The model type returned by the rest client. * @param <R> The model type returned by the public client. * @return paged response with the correct continuation token. */ public static <T, R> PagedResponse<T> getPagedResponseWithContinuationToken(PagedResponse<R> listResponse, Function<List<R>, List<T>> mapperFunction) { Objects.requireNonNull(mapperFunction); String continuationLink = getContinuationLink(listResponse.getHeaders()); List<T> values = mapperFunction.apply(listResponse.getValue()); return new PagedResponseBase<String, T>( listResponse.getRequest(), listResponse.getStatusCode(), listResponse.getHeaders(), values, continuationLink, null ); } private static String getContinuationLink(HttpHeaders headers) { String continuationLinkHeader = headers.getValue(HttpHeaderName.LINK); if (!CoreUtils.isNullOrEmpty(continuationLinkHeader) && continuationLinkHeader.charAt(0) == '<') { int endIndex = continuationLinkHeader.indexOf(">;"); if (endIndex < 2) { LOGGER.warning("unexpected 'Link' header value - '{}'", continuationLinkHeader); } return continuationLinkHeader.substring(1, endIndex); } return null; } public static List<ArtifactManifestProperties> mapManifestsProperties(List<ManifestAttributesBase> baseArtifacts, String repositoryName, String registryLoginServer) { if (baseArtifacts == null) { return null; } List<ArtifactManifestProperties> artifactManifestProperties = new ArrayList<>(baseArtifacts.size()); for (ManifestAttributesBase base : baseArtifacts) { ArtifactManifestPropertiesInternal internal = new ArtifactManifestPropertiesInternal() .setRegistryLoginServer(registryLoginServer) .setRepositoryName(repositoryName) .setDigest(base.getDigest()) .setSizeInBytes(base.getSizeInBytes()) .setCreatedOn(base.getCreatedOn()) .setLastUpdatedOn(base.getLastUpdatedOn()) .setArchitecture(base.getArchitecture()) .setOperatingSystem(base.getOperatingSystem()) .setRelatedArtifacts(base.getRelatedArtifacts()) .setTags(base.getTags()) .setDeleteEnabled(base.isDeleteEnabled()) .setWriteEnabled(base.isWriteEnabled()) .setListEnabled(base.isListEnabled()) .setReadEnabled(base.isReadEnabled()); artifactManifestProperties.add(ArtifactManifestPropertiesHelper.create(internal)); } return artifactManifestProperties; } public static List<ArtifactTagProperties> getTagProperties(List<TagAttributesBase> baseValues, String repositoryName) { Objects.requireNonNull(baseValues); List<ArtifactTagProperties> artifactTagProperties = new ArrayList<>(baseValues.size()); for (TagAttributesBase base : baseValues) { ArtifactTagPropertiesInternal internal = new ArtifactTagPropertiesInternal() .setRepositoryName(repositoryName) .setName(base.getName()) .setDigest(base.getDigest()) .setCreatedOn(base.getCreatedOn()) .setLastUpdatedOn(base.getLastUpdatedOn()) .setDeleteEnabled(base.isDeleteEnabled()) .setWriteEnabled(base.isWriteEnabled()) .setListEnabled(base.isListEnabled()) .setReadEnabled(base.isReadEnabled()); artifactTagProperties.add(ArtifactTagPropertiesHelper.create(internal)); } return artifactTagProperties; } public static void validateResponseHeaderDigest(String requestedDigest, HttpHeaders headers) { String responseHeaderDigest = headers.getValue(DOCKER_DIGEST_HEADER_NAME); if (!requestedDigest.equals(responseHeaderDigest)) { throw LOGGER.atError() .addKeyValue("requestedDigest", requestedDigest) .addKeyValue("responseDigest", responseHeaderDigest) .log(new ServiceResponseException("The digest in the response header does not match the expected digest.")); } } public static <H, T> String getLocation(ResponseBase<H, T> response) { String locationHeader = response.getHeaders().getValue(HttpHeaderName.LOCATION); if (locationHeader != null && locationHeader.startsWith("/")) { return locationHeader.substring(1); } return locationHeader; } public static long getBlobSize(HttpHeaders headers) { HttpHeader contentRangeHeader = headers.get(HttpHeaderName.CONTENT_RANGE); if (contentRangeHeader != null) { long size = extractSizeFromContentRange(contentRangeHeader.getValue()); if (size > 0) { return size; } } throw LOGGER.atError() .addKeyValue("contentRange", contentRangeHeader) .log(new ServiceResponseException("Missing or invalid content-range header in response")); } /** * Checks if string represents tag or digest. * * @param tagOrDigest string to check * @return true if digest, false otherwise. */ public static boolean isDigest(String tagOrDigest) { return tagOrDigest.length() == 71 && tagOrDigest.startsWith("sha256:"); } public static String formatFullyQualifiedReference(String endpoint, String repositoryName, String tagOrDigest) { try { URL endpointUrl = new URL(endpoint); return endpointUrl.getHost() + "/" + repositoryName + (isDigest(tagOrDigest) ? "@" : ":") + tagOrDigest; } catch (MalformedURLException ex) { throw LOGGER.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL", ex)); } } }
good catch! I'll add a new changelog entry - this code is still in beta
public static Response<GetManifestResult> toGetManifestResponse(String tagOrDigest, Response<BinaryData> rawResponse) { checkManifestSize(rawResponse.getHeaders()); String digest = rawResponse.getHeaders().getValue(DOCKER_DIGEST_HEADER_NAME); String responseSha256 = computeDigest(rawResponse.getValue().toByteBuffer()); if (!Objects.equals(responseSha256, digest) || (isDigest(tagOrDigest) && !Objects.equals(responseSha256, tagOrDigest))) { throw LOGGER.logExceptionAsError(new ServiceResponseException("The digest in the response does not match the expected digest.")); } String contentType = rawResponse.getHeaders().getValue(HttpHeaderName.CONTENT_TYPE); ManifestMediaType responseMediaType = contentType != null ? ManifestMediaType.fromString(contentType) : null; return new SimpleResponse<>( rawResponse.getRequest(), rawResponse.getStatusCode(), rawResponse.getHeaders(), ConstructorAccessors.createGetManifestResult(digest, responseMediaType, rawResponse.getValue())); }
checkManifestSize(rawResponse.getHeaders());
public static Response<GetManifestResult> toGetManifestResponse(String tagOrDigest, Response<BinaryData> rawResponse) { checkManifestSize(rawResponse.getHeaders()); String digest = rawResponse.getHeaders().getValue(DOCKER_DIGEST_HEADER_NAME); String responseSha256 = computeDigest(rawResponse.getValue().toByteBuffer()); if (!Objects.equals(responseSha256, digest) || (isDigest(tagOrDigest) && !Objects.equals(responseSha256, tagOrDigest))) { throw LOGGER.logExceptionAsError(new ServiceResponseException("The digest in the response does not match the expected digest.")); } String contentType = rawResponse.getHeaders().getValue(HttpHeaderName.CONTENT_TYPE); ManifestMediaType responseMediaType = contentType != null ? ManifestMediaType.fromString(contentType) : null; return new SimpleResponse<>( rawResponse.getRequest(), rawResponse.getStatusCode(), rawResponse.getHeaders(), ConstructorAccessors.createGetManifestResult(digest, responseMediaType, rawResponse.getValue())); }
class UtilsImpl { private static final ClientLogger LOGGER = new ClientLogger(UtilsImpl.class); private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-containers-containerregistry.properties"); private static final String CLIENT_NAME = PROPERTIES.getOrDefault("name", "UnknownName"); private static final String CLIENT_VERSION = PROPERTIES.getOrDefault("version", "UnknownVersion"); private static final ContainerRegistryAudience ACR_ACCESS_TOKEN_AUDIENCE = ContainerRegistryAudience.fromString("https: private static final int HTTP_STATUS_CODE_NOT_FOUND = 404; private static final int HTTP_STATUS_CODE_ACCEPTED = 202; public static final HttpHeaderName DOCKER_DIGEST_HEADER_NAME = HttpHeaderName.fromString("docker-content-digest"); public static final String SUPPORTED_MANIFEST_TYPES = "*/*" + "," + ManifestMediaType.OCI_MANIFEST + "," + ManifestMediaType.DOCKER_MANIFEST + ",application/vnd.oci.image.index.v1+json" + ",application/vnd.docker.distribution.manifest.list.v2+json" + ",application/vnd.cncf.oras.artifact.manifest.v1+json"; private static final String CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE = "Microsoft.ContainerRegistry"; public static final int CHUNK_SIZE = 4 * 1024 * 1024; public static final int MAX_MANIFEST_SIZE = 4 * 1024 * 1024; public static final String UPLOAD_BLOB_SPAN_NAME = "ContainerRegistryContentAsyncClient.uploadBlob"; public static final String DOWNLOAD_BLOB_SPAN_NAME = "ContainerRegistryContentAsyncClient.downloadBlob"; private UtilsImpl() { } /** * This method builds the httpPipeline for the builders. * @param clientOptions The client options * @param logOptions http log options. * @param configuration configuration settings. * @param retryPolicy retry policy * @param retryOptions retry options * @param credential credentials. * @param audience the audience. * @param perCallPolicies per call policies. * @param perRetryPolicies per retry policies. * @param httpClient http client * @param endpoint endpoint to be called * @param serviceVersion the service api version being targeted by the client. * @return returns the httpPipeline to be consumed by the builders. */ public static HttpPipeline buildClientPipeline( ClientOptions clientOptions, HttpLogOptions logOptions, Configuration configuration, RetryPolicy retryPolicy, RetryOptions retryOptions, TokenCredential credential, ContainerRegistryAudience audience, List<HttpPipelinePolicy> perCallPolicies, List<HttpPipelinePolicy> perRetryPolicies, HttpClient httpClient, String endpoint, ContainerRegistryServiceVersion serviceVersion, Tracer tracer) { if (credential == null) { LOGGER.verbose("Credentials are null, enabling anonymous access"); } if (audience == null) { LOGGER.info("Audience is not specified, defaulting to ACR access token scope."); audience = ACR_ACCESS_TOKEN_AUDIENCE; } if (serviceVersion == null) { serviceVersion = ContainerRegistryServiceVersion.getLatest(); } HttpPipeline credentialsPipeline = buildPipeline(clientOptions, logOptions, configuration, retryPolicy, retryOptions, null, null, perCallPolicies, perRetryPolicies, httpClient, tracer); return buildPipeline(clientOptions, logOptions, configuration, retryPolicy, retryOptions, buildCredentialsPolicy(credentialsPipeline, credential, audience, endpoint, serviceVersion), new RedirectPolicy(), perCallPolicies, perRetryPolicies, httpClient, tracer); } private static ContainerRegistryCredentialsPolicy buildCredentialsPolicy(HttpPipeline credentialPipeline, TokenCredential credential, ContainerRegistryAudience audience, String endpoint, ContainerRegistryServiceVersion serviceVersion) { AzureContainerRegistryImpl acrClient = new AzureContainerRegistryImpl( credentialPipeline, endpoint, serviceVersion.getVersion()); ContainerRegistryTokenService tokenService = new ContainerRegistryTokenService(credential, audience, acrClient); return new ContainerRegistryCredentialsPolicy(tokenService, audience + "/.default"); } private static HttpPipeline buildPipeline(ClientOptions clientOptions, HttpLogOptions logOptions, Configuration configuration, RetryPolicy retryPolicy, RetryOptions retryOptions, ContainerRegistryCredentialsPolicy credentialPolicy, RedirectPolicy redirectPolicy, List<HttpPipelinePolicy> perCallPolicies, List<HttpPipelinePolicy> perRetryPolicies, HttpClient httpClient, Tracer tracer) { ArrayList<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add( new UserAgentPolicy(CoreUtils.getApplicationId(clientOptions, logOptions), CLIENT_NAME, CLIENT_VERSION, configuration)); policies.add(new RequestIdPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions)); policies.add(new CookiePolicy()); policies.add(new AddDatePolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); if (credentialPolicy != null) { policies.add(credentialPolicy); } if (redirectPolicy != null) { policies.add(redirectPolicy); } policies.addAll(perRetryPolicies); policies.add(new HttpLoggingPolicy(logOptions)); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .clientOptions(clientOptions) .tracer(tracer) .build(); } public static Tracer createTracer(ClientOptions clientOptions) { TracingOptions tracingOptions = clientOptions == null ? null : clientOptions.getTracingOptions(); return TracerProvider.getDefaultProvider() .createTracer(CLIENT_NAME, CLIENT_VERSION, CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE, tracingOptions); } /** * This method computes the digest for the buffer content. * Docker digest is a SHA256 hash of the docker image content and is deterministic based on the image build. * @param buffer The buffer containing the image bytes. * @return SHA-256 digest for the given buffer. */ public static String computeDigest(ByteBuffer buffer) { MessageDigest md = createSha256(); md.update(buffer.asReadOnlyBuffer()); return "sha256:" + bytesToHexString(md.digest()); } public static MessageDigest createSha256() { try { return MessageDigest.getInstance("SHA-256"); } catch (NoSuchAlgorithmException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } public static void validateDigest(MessageDigest messageDigest, String requestedDigest) { String sha256 = bytesToHexString(messageDigest.digest()); if (isDigest(requestedDigest) && !requestedDigest.endsWith(sha256)) { throw LOGGER.atError() .addKeyValue("requestedDigest", requestedDigest) .addKeyValue("actualDigest", () -> "sha256:" + sha256) .log(new ServiceResponseException("The digest in the response does not match the expected digest.")); } } private static long checkManifestSize(HttpHeaders headers) { String contentLengthString = headers.getValue(HttpHeaderName.CONTENT_LENGTH); if (CoreUtils.isNullOrEmpty(contentLengthString)) { throw LOGGER.logExceptionAsError(new ServiceResponseException("Response does not include `Content-Length` header")); } try { long contentLength = Long.parseLong(contentLengthString); if (contentLength > MAX_MANIFEST_SIZE) { throw LOGGER.atError() .addKeyValue("contentLength", contentLengthString) .log(new ServiceResponseException("Manifest size is bigger than 4MB")); } return contentLength; } catch (NumberFormatException | NullPointerException e) { throw LOGGER.atError() .addKeyValue("contentLength", contentLengthString) .log(new ServiceResponseException("Could not parse `Content-Length` header")); } } /** * Delete operation should be idempotent. * And so should result in a success in case the service response is 400 : Not found. * @param responseT The response object. * @param <T> The encapsulating value. * @return The transformed response object. */ public static <T> Response<Void> deleteResponseToSuccess(Response<T> responseT) { if (responseT.getStatusCode() != HTTP_STATUS_CODE_NOT_FOUND) { return getAcceptedDeleteResponse(responseT, responseT.getStatusCode()); } return getAcceptedDeleteResponse(responseT, HTTP_STATUS_CODE_ACCEPTED); } private static <T> Response<Void> getAcceptedDeleteResponse(Response<T> responseT, int statusCode) { return new SimpleResponse<>( responseT.getRequest(), statusCode, responseT.getHeaders(), null); } /** * This method converts AcrErrors inside AcrErrorsException into {@link HttpResponseException} * with {@link ResponseError} */ public static HttpResponseException mapAcrErrorsException(AcrErrorsException acrException) { final HttpResponse errorHttpResponse = acrException.getResponse(); if (acrException.getValue() != null && !CoreUtils.isNullOrEmpty(acrException.getValue().getErrors())) { AcrErrorInfo first = acrException.getValue().getErrors().get(0); ResponseError error = new ResponseError(first.getCode(), first.getMessage()); switch (errorHttpResponse.getStatusCode()) { case 401: throw new ClientAuthenticationException(acrException.getMessage(), acrException.getResponse(), error); case 404: return new ResourceNotFoundException(acrException.getMessage(), acrException.getResponse(), error); case 409: return new ResourceExistsException(acrException.getMessage(), acrException.getResponse(), error); case 412: return new ResourceModifiedException(acrException.getMessage(), acrException.getResponse(), error); default: return new HttpResponseException(acrException.getMessage(), acrException.getResponse(), error); } } return acrException; } /** * This method parses the response to get the continuation token used to make the next pagination call. * The continuation token is returned by the service in the form of a header and not as a nextLink field. * @param listResponse response that is parsed. * @param <T> the model type that is being operated on. * @return paged response with the correct continuation token. */ public static <T> PagedResponse<T> getPagedResponseWithContinuationToken(PagedResponse<T> listResponse) { return getPagedResponseWithContinuationToken(listResponse, values -> values); } /** * This method parses the response to get the continuation token used to make the next pagination call. * The continuation token is returned by the service in the form of a header and not as a nextLink field. * * <p> * Per the Docker v2 HTTP API spec, the Link header is an RFC5988 * compliant rel='next' with URL to next result set, if available. * See: https: * * The URI reference can be obtained from link-value as follows: * Link = "Link" ":" * link-value = "<" URI-Reference ">" * (";" link-param ) * See: https: * </p> * @param listResponse response that is parsed. * @param mapperFunction the function that maps the rest api response into the public model exposed by the client. * @param <T> The model type returned by the rest client. * @param <R> The model type returned by the public client. * @return paged response with the correct continuation token. */ public static <T, R> PagedResponse<T> getPagedResponseWithContinuationToken(PagedResponse<R> listResponse, Function<List<R>, List<T>> mapperFunction) { Objects.requireNonNull(mapperFunction); String continuationLink = getContinuationLink(listResponse.getHeaders()); List<T> values = mapperFunction.apply(listResponse.getValue()); return new PagedResponseBase<String, T>( listResponse.getRequest(), listResponse.getStatusCode(), listResponse.getHeaders(), values, continuationLink, null ); } private static String getContinuationLink(HttpHeaders headers) { String continuationLinkHeader = headers.getValue(HttpHeaderName.LINK); if (!CoreUtils.isNullOrEmpty(continuationLinkHeader) && continuationLinkHeader.charAt(0) == '<') { int endIndex = continuationLinkHeader.indexOf(">;"); if (endIndex < 2) { LOGGER.warning("unexpected 'Link' header value - '{}'", continuationLinkHeader); } return continuationLinkHeader.substring(1, endIndex); } return null; } public static List<ArtifactManifestProperties> mapManifestsProperties(List<ManifestAttributesBase> baseArtifacts, String repositoryName, String registryLoginServer) { if (baseArtifacts == null) { return null; } List<ArtifactManifestProperties> artifactManifestProperties = new ArrayList<>(baseArtifacts.size()); for (ManifestAttributesBase base : baseArtifacts) { ArtifactManifestPropertiesInternal internal = new ArtifactManifestPropertiesInternal() .setRegistryLoginServer(registryLoginServer) .setRepositoryName(repositoryName) .setDigest(base.getDigest()) .setSizeInBytes(base.getSizeInBytes()) .setCreatedOn(base.getCreatedOn()) .setLastUpdatedOn(base.getLastUpdatedOn()) .setArchitecture(base.getArchitecture()) .setOperatingSystem(base.getOperatingSystem()) .setRelatedArtifacts(base.getRelatedArtifacts()) .setTags(base.getTags()) .setDeleteEnabled(base.isDeleteEnabled()) .setWriteEnabled(base.isWriteEnabled()) .setListEnabled(base.isListEnabled()) .setReadEnabled(base.isReadEnabled()); artifactManifestProperties.add(ArtifactManifestPropertiesHelper.create(internal)); } return artifactManifestProperties; } public static List<ArtifactTagProperties> getTagProperties(List<TagAttributesBase> baseValues, String repositoryName) { Objects.requireNonNull(baseValues); List<ArtifactTagProperties> artifactTagProperties = new ArrayList<>(baseValues.size()); for (TagAttributesBase base : baseValues) { ArtifactTagPropertiesInternal internal = new ArtifactTagPropertiesInternal() .setRepositoryName(repositoryName) .setName(base.getName()) .setDigest(base.getDigest()) .setCreatedOn(base.getCreatedOn()) .setLastUpdatedOn(base.getLastUpdatedOn()) .setDeleteEnabled(base.isDeleteEnabled()) .setWriteEnabled(base.isWriteEnabled()) .setListEnabled(base.isListEnabled()) .setReadEnabled(base.isReadEnabled()); artifactTagProperties.add(ArtifactTagPropertiesHelper.create(internal)); } return artifactTagProperties; } public static void validateResponseHeaderDigest(String requestedDigest, HttpHeaders headers) { String responseHeaderDigest = headers.getValue(DOCKER_DIGEST_HEADER_NAME); if (!requestedDigest.equals(responseHeaderDigest)) { throw LOGGER.atError() .addKeyValue("requestedDigest", requestedDigest) .addKeyValue("responseDigest", responseHeaderDigest) .log(new ServiceResponseException("The digest in the response header does not match the expected digest.")); } } public static <H, T> String getLocation(ResponseBase<H, T> response) { String locationHeader = response.getHeaders().getValue(HttpHeaderName.LOCATION); if (locationHeader != null && locationHeader.startsWith("/")) { return locationHeader.substring(1); } return locationHeader; } public static long getBlobSize(HttpHeader contentRangeHeader) { Exception cause = null; if (contentRangeHeader != null) { int slashInd = contentRangeHeader.getValue().indexOf('/'); if (slashInd > 0) { try { long blobSize = Long.parseLong(contentRangeHeader.getValue().substring(slashInd + 1)); if (blobSize > 0) { return blobSize; } } catch (NumberFormatException ex) { cause = ex; } } } throw LOGGER.atError() .addKeyValue("contentRange", contentRangeHeader) .log(new ServiceResponseException("Invalid content-range header in response", cause)); } /** * Checks if string represents tag or digest. * * @param tagOrDigest string to check * @return true if digest, false otherwise. */ public static boolean isDigest(String tagOrDigest) { return tagOrDigest.length() == 71 && tagOrDigest.startsWith("sha256:"); } public static String formatFullyQualifiedReference(String endpoint, String repositoryName, String tagOrDigest) { try { URL endpointUrl = new URL(endpoint); return endpointUrl.getHost() + "/" + repositoryName + (isDigest(tagOrDigest) ? "@" : ":") + tagOrDigest; } catch (MalformedURLException ex) { throw LOGGER.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL", ex)); } } }
class UtilsImpl { private static final ClientLogger LOGGER = new ClientLogger(UtilsImpl.class); private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-containers-containerregistry.properties"); private static final String CLIENT_NAME = PROPERTIES.getOrDefault("name", "UnknownName"); private static final String CLIENT_VERSION = PROPERTIES.getOrDefault("version", "UnknownVersion"); private static final ContainerRegistryAudience ACR_ACCESS_TOKEN_AUDIENCE = ContainerRegistryAudience.fromString("https: private static final int HTTP_STATUS_CODE_NOT_FOUND = 404; private static final int HTTP_STATUS_CODE_ACCEPTED = 202; public static final HttpHeaderName DOCKER_DIGEST_HEADER_NAME = HttpHeaderName.fromString("docker-content-digest"); public static final String SUPPORTED_MANIFEST_TYPES = "*/*" + "," + ManifestMediaType.OCI_MANIFEST + "," + ManifestMediaType.DOCKER_MANIFEST + ",application/vnd.oci.image.index.v1+json" + ",application/vnd.docker.distribution.manifest.list.v2+json" + ",application/vnd.cncf.oras.artifact.manifest.v1+json"; private static final String CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE = "Microsoft.ContainerRegistry"; public static final int CHUNK_SIZE = 4 * 1024 * 1024; public static final int MAX_MANIFEST_SIZE = 4 * 1024 * 1024; public static final String UPLOAD_BLOB_SPAN_NAME = "ContainerRegistryContentAsyncClient.uploadBlob"; public static final String DOWNLOAD_BLOB_SPAN_NAME = "ContainerRegistryContentAsyncClient.downloadBlob"; private UtilsImpl() { } /** * This method builds the httpPipeline for the builders. * @param clientOptions The client options * @param logOptions http log options. * @param configuration configuration settings. * @param retryPolicy retry policy * @param retryOptions retry options * @param credential credentials. * @param audience the audience. * @param perCallPolicies per call policies. * @param perRetryPolicies per retry policies. * @param httpClient http client * @param endpoint endpoint to be called * @param serviceVersion the service api version being targeted by the client. * @return returns the httpPipeline to be consumed by the builders. */ public static HttpPipeline buildClientPipeline( ClientOptions clientOptions, HttpLogOptions logOptions, Configuration configuration, RetryPolicy retryPolicy, RetryOptions retryOptions, TokenCredential credential, ContainerRegistryAudience audience, List<HttpPipelinePolicy> perCallPolicies, List<HttpPipelinePolicy> perRetryPolicies, HttpClient httpClient, String endpoint, ContainerRegistryServiceVersion serviceVersion, Tracer tracer) { if (credential == null) { LOGGER.verbose("Credentials are null, enabling anonymous access"); } if (audience == null) { LOGGER.info("Audience is not specified, defaulting to ACR access token scope."); audience = ACR_ACCESS_TOKEN_AUDIENCE; } if (serviceVersion == null) { serviceVersion = ContainerRegistryServiceVersion.getLatest(); } HttpPipeline credentialsPipeline = buildPipeline(clientOptions, logOptions, configuration, retryPolicy, retryOptions, null, null, perCallPolicies, perRetryPolicies, httpClient, tracer); return buildPipeline(clientOptions, logOptions, configuration, retryPolicy, retryOptions, buildCredentialsPolicy(credentialsPipeline, credential, audience, endpoint, serviceVersion), new RedirectPolicy(), perCallPolicies, perRetryPolicies, httpClient, tracer); } private static ContainerRegistryCredentialsPolicy buildCredentialsPolicy(HttpPipeline credentialPipeline, TokenCredential credential, ContainerRegistryAudience audience, String endpoint, ContainerRegistryServiceVersion serviceVersion) { AzureContainerRegistryImpl acrClient = new AzureContainerRegistryImpl( credentialPipeline, endpoint, serviceVersion.getVersion()); ContainerRegistryTokenService tokenService = new ContainerRegistryTokenService(credential, audience, acrClient); return new ContainerRegistryCredentialsPolicy(tokenService, audience + "/.default"); } private static HttpPipeline buildPipeline(ClientOptions clientOptions, HttpLogOptions logOptions, Configuration configuration, RetryPolicy retryPolicy, RetryOptions retryOptions, ContainerRegistryCredentialsPolicy credentialPolicy, RedirectPolicy redirectPolicy, List<HttpPipelinePolicy> perCallPolicies, List<HttpPipelinePolicy> perRetryPolicies, HttpClient httpClient, Tracer tracer) { ArrayList<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add( new UserAgentPolicy(CoreUtils.getApplicationId(clientOptions, logOptions), CLIENT_NAME, CLIENT_VERSION, configuration)); policies.add(new RequestIdPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions)); policies.add(new CookiePolicy()); policies.add(new AddDatePolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); if (credentialPolicy != null) { policies.add(credentialPolicy); } if (redirectPolicy != null) { policies.add(redirectPolicy); } policies.addAll(perRetryPolicies); policies.add(new HttpLoggingPolicy(logOptions)); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .clientOptions(clientOptions) .tracer(tracer) .build(); } public static Tracer createTracer(ClientOptions clientOptions) { TracingOptions tracingOptions = clientOptions == null ? null : clientOptions.getTracingOptions(); return TracerProvider.getDefaultProvider() .createTracer(CLIENT_NAME, CLIENT_VERSION, CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE, tracingOptions); } /** * This method computes the digest for the buffer content. * Docker digest is a SHA256 hash of the docker image content and is deterministic based on the image build. * @param buffer The buffer containing the image bytes. * @return SHA-256 digest for the given buffer. */ public static String computeDigest(ByteBuffer buffer) { MessageDigest md = createSha256(); md.update(buffer.asReadOnlyBuffer()); return "sha256:" + bytesToHexString(md.digest()); } public static MessageDigest createSha256() { try { return MessageDigest.getInstance("SHA-256"); } catch (NoSuchAlgorithmException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } public static void validateDigest(MessageDigest messageDigest, String requestedDigest) { String sha256 = bytesToHexString(messageDigest.digest()); if (isDigest(requestedDigest) && !requestedDigest.endsWith(sha256)) { throw LOGGER.atError() .addKeyValue("requestedDigest", requestedDigest) .addKeyValue("actualDigest", () -> "sha256:" + sha256) .log(new ServiceResponseException("The digest in the response does not match the expected digest.")); } } private static long checkManifestSize(HttpHeaders headers) { String contentLengthString = headers.getValue(HttpHeaderName.CONTENT_LENGTH); if (CoreUtils.isNullOrEmpty(contentLengthString)) { throw LOGGER.logExceptionAsError(new ServiceResponseException("Response does not include `Content-Length` header")); } try { long contentLength = Long.parseLong(contentLengthString); if (contentLength > MAX_MANIFEST_SIZE) { throw LOGGER.atError() .addKeyValue("contentLength", contentLengthString) .log(new ServiceResponseException("Manifest size is bigger than 4MB")); } return contentLength; } catch (NumberFormatException | NullPointerException e) { throw LOGGER.atError() .addKeyValue("contentLength", contentLengthString) .log(new ServiceResponseException("Could not parse `Content-Length` header")); } } /** * Delete operation should be idempotent. * And so should result in a success in case the service response is 400 : Not found. * @param responseT The response object. * @param <T> The encapsulating value. * @return The transformed response object. */ public static <T> Response<Void> deleteResponseToSuccess(Response<T> responseT) { if (responseT.getStatusCode() != HTTP_STATUS_CODE_NOT_FOUND) { return getAcceptedDeleteResponse(responseT, responseT.getStatusCode()); } return getAcceptedDeleteResponse(responseT, HTTP_STATUS_CODE_ACCEPTED); } private static <T> Response<Void> getAcceptedDeleteResponse(Response<T> responseT, int statusCode) { return new SimpleResponse<>( responseT.getRequest(), statusCode, responseT.getHeaders(), null); } /** * This method converts AcrErrors inside AcrErrorsException into {@link HttpResponseException} * with {@link ResponseError} */ public static HttpResponseException mapAcrErrorsException(AcrErrorsException acrException) { final HttpResponse errorHttpResponse = acrException.getResponse(); if (acrException.getValue() != null && !CoreUtils.isNullOrEmpty(acrException.getValue().getErrors())) { AcrErrorInfo first = acrException.getValue().getErrors().get(0); ResponseError error = new ResponseError(first.getCode(), first.getMessage()); switch (errorHttpResponse.getStatusCode()) { case 401: throw new ClientAuthenticationException(acrException.getMessage(), acrException.getResponse(), error); case 404: return new ResourceNotFoundException(acrException.getMessage(), acrException.getResponse(), error); case 409: return new ResourceExistsException(acrException.getMessage(), acrException.getResponse(), error); case 412: return new ResourceModifiedException(acrException.getMessage(), acrException.getResponse(), error); default: return new HttpResponseException(acrException.getMessage(), acrException.getResponse(), error); } } return acrException; } /** * This method parses the response to get the continuation token used to make the next pagination call. * The continuation token is returned by the service in the form of a header and not as a nextLink field. * @param listResponse response that is parsed. * @param <T> the model type that is being operated on. * @return paged response with the correct continuation token. */ public static <T> PagedResponse<T> getPagedResponseWithContinuationToken(PagedResponse<T> listResponse) { return getPagedResponseWithContinuationToken(listResponse, values -> values); } /** * This method parses the response to get the continuation token used to make the next pagination call. * The continuation token is returned by the service in the form of a header and not as a nextLink field. * * <p> * Per the Docker v2 HTTP API spec, the Link header is an RFC5988 * compliant rel='next' with URL to next result set, if available. * See: https: * * The URI reference can be obtained from link-value as follows: * Link = "Link" ":" * link-value = "<" URI-Reference ">" * (";" link-param ) * See: https: * </p> * @param listResponse response that is parsed. * @param mapperFunction the function that maps the rest api response into the public model exposed by the client. * @param <T> The model type returned by the rest client. * @param <R> The model type returned by the public client. * @return paged response with the correct continuation token. */ public static <T, R> PagedResponse<T> getPagedResponseWithContinuationToken(PagedResponse<R> listResponse, Function<List<R>, List<T>> mapperFunction) { Objects.requireNonNull(mapperFunction); String continuationLink = getContinuationLink(listResponse.getHeaders()); List<T> values = mapperFunction.apply(listResponse.getValue()); return new PagedResponseBase<String, T>( listResponse.getRequest(), listResponse.getStatusCode(), listResponse.getHeaders(), values, continuationLink, null ); } private static String getContinuationLink(HttpHeaders headers) { String continuationLinkHeader = headers.getValue(HttpHeaderName.LINK); if (!CoreUtils.isNullOrEmpty(continuationLinkHeader) && continuationLinkHeader.charAt(0) == '<') { int endIndex = continuationLinkHeader.indexOf(">;"); if (endIndex < 2) { LOGGER.warning("unexpected 'Link' header value - '{}'", continuationLinkHeader); } return continuationLinkHeader.substring(1, endIndex); } return null; } public static List<ArtifactManifestProperties> mapManifestsProperties(List<ManifestAttributesBase> baseArtifacts, String repositoryName, String registryLoginServer) { if (baseArtifacts == null) { return null; } List<ArtifactManifestProperties> artifactManifestProperties = new ArrayList<>(baseArtifacts.size()); for (ManifestAttributesBase base : baseArtifacts) { ArtifactManifestPropertiesInternal internal = new ArtifactManifestPropertiesInternal() .setRegistryLoginServer(registryLoginServer) .setRepositoryName(repositoryName) .setDigest(base.getDigest()) .setSizeInBytes(base.getSizeInBytes()) .setCreatedOn(base.getCreatedOn()) .setLastUpdatedOn(base.getLastUpdatedOn()) .setArchitecture(base.getArchitecture()) .setOperatingSystem(base.getOperatingSystem()) .setRelatedArtifacts(base.getRelatedArtifacts()) .setTags(base.getTags()) .setDeleteEnabled(base.isDeleteEnabled()) .setWriteEnabled(base.isWriteEnabled()) .setListEnabled(base.isListEnabled()) .setReadEnabled(base.isReadEnabled()); artifactManifestProperties.add(ArtifactManifestPropertiesHelper.create(internal)); } return artifactManifestProperties; } public static List<ArtifactTagProperties> getTagProperties(List<TagAttributesBase> baseValues, String repositoryName) { Objects.requireNonNull(baseValues); List<ArtifactTagProperties> artifactTagProperties = new ArrayList<>(baseValues.size()); for (TagAttributesBase base : baseValues) { ArtifactTagPropertiesInternal internal = new ArtifactTagPropertiesInternal() .setRepositoryName(repositoryName) .setName(base.getName()) .setDigest(base.getDigest()) .setCreatedOn(base.getCreatedOn()) .setLastUpdatedOn(base.getLastUpdatedOn()) .setDeleteEnabled(base.isDeleteEnabled()) .setWriteEnabled(base.isWriteEnabled()) .setListEnabled(base.isListEnabled()) .setReadEnabled(base.isReadEnabled()); artifactTagProperties.add(ArtifactTagPropertiesHelper.create(internal)); } return artifactTagProperties; } public static void validateResponseHeaderDigest(String requestedDigest, HttpHeaders headers) { String responseHeaderDigest = headers.getValue(DOCKER_DIGEST_HEADER_NAME); if (!requestedDigest.equals(responseHeaderDigest)) { throw LOGGER.atError() .addKeyValue("requestedDigest", requestedDigest) .addKeyValue("responseDigest", responseHeaderDigest) .log(new ServiceResponseException("The digest in the response header does not match the expected digest.")); } } public static <H, T> String getLocation(ResponseBase<H, T> response) { String locationHeader = response.getHeaders().getValue(HttpHeaderName.LOCATION); if (locationHeader != null && locationHeader.startsWith("/")) { return locationHeader.substring(1); } return locationHeader; } public static long getBlobSize(HttpHeaders headers) { HttpHeader contentRangeHeader = headers.get(HttpHeaderName.CONTENT_RANGE); if (contentRangeHeader != null) { long size = extractSizeFromContentRange(contentRangeHeader.getValue()); if (size > 0) { return size; } } throw LOGGER.atError() .addKeyValue("contentRange", contentRangeHeader) .log(new ServiceResponseException("Missing or invalid content-range header in response")); } /** * Checks if string represents tag or digest. * * @param tagOrDigest string to check * @return true if digest, false otherwise. */ public static boolean isDigest(String tagOrDigest) { return tagOrDigest.length() == 71 && tagOrDigest.startsWith("sha256:"); } public static String formatFullyQualifiedReference(String endpoint, String repositoryName, String tagOrDigest) { try { URL endpointUrl = new URL(endpoint); return endpointUrl.getHost() + "/" + repositoryName + (isDigest(tagOrDigest) ? "@" : ":") + tagOrDigest; } catch (MalformedURLException ex) { throw LOGGER.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL", ex)); } } }
Call the convenience method instead `getCompletions(deploymentId, completionOptions)`, so you don't have to repeat the code to convert BinaryData into Completions.
public Mono<Completions> getCompletions(String deploymentId, String prompt) { RequestOptions requestOptions = new RequestOptions(); CompletionsOptions completionsOptions = CompletionsUtils.DefaultCompletionsOptions(prompt); return getCompletionsWithResponse(deploymentId, BinaryData.fromObject(completionsOptions), requestOptions) .flatMap(FluxUtil::toMono) .map(protocolMethodData -> protocolMethodData.toObject(Completions.class)); }
return getCompletionsWithResponse(deploymentId, BinaryData.fromObject(completionsOptions), requestOptions)
public Mono<Completions> getCompletions(String deploymentId, String prompt) { return getCompletions(deploymentId, CompletionsUtils.defaultCompletionsOptions(prompt)); }
class OpenAIAsyncClient { @Generated private final OpenAIClientImpl serviceClient; /** * Initializes an instance of OpenAIAsyncClient class. * * @param serviceClient the service client implementation. */ @Generated OpenAIAsyncClient(OpenAIClientImpl serviceClient) { this.serviceClient = serviceClient; } /** * Return the embeddings for a given prompt. * * <p><strong>Request Body Schema</strong> * * <pre>{@code * { * user: String (Optional) * model: String (Optional) * input: InputModelBase (Required) * } * }</pre> * * <p><strong>Response Body Schema</strong> * * <pre>{@code * { * data (Required): [ * (Required){ * embedding (Required): [ * double (Required) * ] * index: int (Required) * } * ] * usage (Required): { * prompt_tokens: int (Required) * total_tokens: int (Required) * } * } * }</pre> * * @param deploymentId deployment id of the deployed model. * @param embeddingsOptions The configuration information for an embeddings request. Embeddings measure the * relatedness of text strings and are commonly used for search, clustering, recommendations, and other similar * scenarios. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @return representation of the response data from an embeddings request. Embeddings measure the relatedness of * text strings and are commonly used for search, clustering, recommendations, and other similar scenarios along * with {@link Response} on successful completion of {@link Mono}. */ @Generated @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<BinaryData>> getEmbeddingsWithResponse( String deploymentId, BinaryData embeddingsOptions, RequestOptions requestOptions) { return this.serviceClient.getEmbeddingsWithResponseAsync(deploymentId, embeddingsOptions, requestOptions); } /** * Gets completions for the provided input prompts. Completions support a wide variety of tasks and generate text * that continues from or "completes" provided prompt data. * * <p><strong>Request Body Schema</strong> * * <pre>{@code * { * prompt (Required): [ * String (Required) * ] * max_tokens: Integer (Optional) * temperature: Double (Optional) * top_p: Double (Optional) * logit_bias (Optional): { * String: int (Optional) * } * user: String (Optional) * n: Integer (Optional) * logprobs: Integer (Optional) * echo: Boolean (Optional) * stop (Optional): [ * String (Optional) * ] * presence_penalty: Double (Optional) * frequency_penalty: Double (Optional) * best_of: Integer (Optional) * stream: Boolean (Optional) * model: String (Optional) * } * }</pre> * * <p><strong>Response Body Schema</strong> * * <pre>{@code * { * id: String (Required) * created: int (Required) * choices (Required): [ * (Required){ * text: String (Required) * index: int (Required) * logprobs (Required): { * tokens (Required): [ * String (Required) * ] * token_logprobs (Required): [ * double (Required) * ] * top_logprobs (Required): [ * (Required){ * String: double (Required) * } * ] * text_offset (Required): [ * int (Required) * ] * } * finish_reason: String(stopped/tokenLimitReached/contentFiltered) (Required) * } * ] * usage (Required): { * completion_tokens: int (Required) * prompt_tokens: int (Required) * total_tokens: int (Required) * } * } * }</pre> * * @param deploymentId deployment id of the deployed model. * @param completionsOptions The configuration information for a completions request. Completions support a wide * variety of tasks and generate text that continues from or "completes" provided prompt data. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @return completions for the provided input prompts. Completions support a wide variety of tasks and generate text * that continues from or "completes" provided prompt data along with {@link Response} on successful completion * of {@link Mono}. */ @Generated @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<BinaryData>> getCompletionsWithResponse( String deploymentId, BinaryData completionsOptions, RequestOptions requestOptions) { return this.serviceClient.getCompletionsWithResponseAsync(deploymentId, completionsOptions, requestOptions); } /** * Gets completions for the provided input prompts. Completions support a wide variety of tasks and generate text * that continues from or "completes" provided prompt data. * * <p><strong>Request Body Schema</strong> * * <pre>{@code * { * prompt (Required): [ * String (Required) * ] * max_tokens: Integer (Optional) * temperature: Double (Optional) * top_p: Double (Optional) * logit_bias (Optional): { * String: int (Optional) * } * user: String (Optional) * n: Integer (Optional) * logprobs: Integer (Optional) * echo: Boolean (Optional) * stop (Optional): [ * String (Optional) * ] * presence_penalty: Double (Optional) * frequency_penalty: Double (Optional) * best_of: Integer (Optional) * stream: Boolean (Optional) * model: String (Optional) * } * }</pre> * * <p><strong>Response Body Schema</strong> * * <pre>{@code * { * id: String (Required) * created: int (Required) * choices (Required): [ * (Required){ * text: String (Required) * index: int (Required) * logprobs (Required): { * tokens (Required): [ * String (Required) * ] * token_logprobs (Required): [ * double (Required) * ] * top_logprobs (Required): [ * (Required){ * String: double (Required) * } * ] * text_offset (Required): [ * int (Required) * ] * } * finish_reason: String(stopped/tokenLimitReached/contentFiltered) (Required) * } * ] * usage (Required): { * completion_tokens: int (Required) * prompt_tokens: int (Required) * total_tokens: int (Required) * } * } * }</pre> * * @param deploymentId deployment id of the deployed model. * @param prompt The prompts to generate values from. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @return completions for the provided input prompts. Completions support a wide variety of tasks and generate text * that continues from or "completes" provided prompt data along with {@link Response} on successful completion * of {@link Mono}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<BinaryData>> getCompletionsWithResponse( String deploymentId, String prompt, RequestOptions requestOptions) { CompletionsOptions completionsOptions = CompletionsUtils.DefaultCompletionsOptions(prompt); BinaryData completionsOptionsRequest = BinaryData.fromObject(completionsOptions); return this.serviceClient.getCompletionsWithResponseAsync(deploymentId, completionsOptionsRequest, requestOptions); } /** * Gets chat completions for the provided chat messages. Completions support a wide variety of tasks and generate * text that continues from or "completes" provided prompt data. * * <p><strong>Request Body Schema</strong> * * <pre>{@code * { * messages (Required): [ * (Required){ * role: String(system/assistant/user) (Required) * content: String (Optional) * } * ] * max_tokens: Integer (Optional) * temperature: Double (Optional) * top_p: Double (Optional) * logit_bias (Optional): { * String: int (Optional) * } * user: String (Optional) * n: Integer (Optional) * stop (Optional): [ * String (Optional) * ] * presence_penalty: Double (Optional) * frequency_penalty: Double (Optional) * stream: Boolean (Optional) * model: String (Optional) * } * }</pre> * * <p><strong>Response Body Schema</strong> * * <pre>{@code * { * id: String (Required) * created: int (Required) * choices (Required): [ * (Required){ * message (Optional): { * role: String(system/assistant/user) (Required) * content: String (Optional) * } * index: int (Required) * finish_reason: String(stopped/tokenLimitReached/contentFiltered) (Required) * delta (Optional): (recursive schema, see delta above) * } * ] * usage (Required): { * completion_tokens: int (Required) * prompt_tokens: int (Required) * total_tokens: int (Required) * } * } * }</pre> * * @param deploymentId deployment id of the deployed model. * @param chatCompletionsOptions The configuration information for a chat completions request. Completions support a * wide variety of tasks and generate text that continues from or "completes" provided prompt data. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @return chat completions for the provided chat messages. Completions support a wide variety of tasks and generate * text that continues from or "completes" provided prompt data along with {@link Response} on successful * completion of {@link Mono}. */ @Generated @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<BinaryData>> getChatCompletionsWithResponse( String deploymentId, BinaryData chatCompletionsOptions, RequestOptions requestOptions) { return this.serviceClient.getChatCompletionsWithResponseAsync( deploymentId, chatCompletionsOptions, requestOptions); } /** * Return the embeddings for a given prompt. * * @param deploymentId deployment id of the deployed model. * @param embeddingsOptions The configuration information for an embeddings request. Embeddings measure the * relatedness of text strings and are commonly used for search, clustering, recommendations, and other similar * scenarios. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return representation of the response data from an embeddings request. Embeddings measure the relatedness of * text strings and are commonly used for search, clustering, recommendations, and other similar scenarios on * successful completion of {@link Mono}. */ @Generated @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Embeddings> getEmbeddings(String deploymentId, EmbeddingsOptions embeddingsOptions) { RequestOptions requestOptions = new RequestOptions(); return getEmbeddingsWithResponse(deploymentId, BinaryData.fromObject(embeddingsOptions), requestOptions) .flatMap(FluxUtil::toMono) .map(protocolMethodData -> protocolMethodData.toObject(Embeddings.class)); } /** * Gets completions for the provided input prompts. Completions support a wide variety of tasks and generate text * that continues from or "completes" provided prompt data. * * @param deploymentId deployment id of the deployed model. * @param completionsOptions The configuration information for a completions request. Completions support a wide * variety of tasks and generate text that continues from or "completes" provided prompt data. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return completions for the provided input prompts. Completions support a wide variety of tasks and generate text * that continues from or "completes" provided prompt data on successful completion of {@link Mono}. */ @Generated @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Completions> getCompletions(String deploymentId, CompletionsOptions completionsOptions) { RequestOptions requestOptions = new RequestOptions(); return getCompletionsWithResponse(deploymentId, BinaryData.fromObject(completionsOptions), requestOptions) .flatMap(FluxUtil::toMono) .map(protocolMethodData -> protocolMethodData.toObject(Completions.class)); } /** * Gets completions for the provided input prompts. Completions support a wide variety of tasks and generate text * that continues from or "completes" provided prompt data. * * @param deploymentId deployment id of the deployed model. * @param prompt The prompts to generate values from. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return completions for the provided input prompts. Completions support a wide variety of tasks and generate text * that continues from or "completes" provided prompt data on successful completion of {@link Mono}. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Gets chat completions for the provided chat messages. Completions support a wide variety of tasks and generate * text that continues from or "completes" provided prompt data. * * @param deploymentId deployment id of the deployed model. * @param chatCompletionsOptions The configuration information for a chat completions request. Completions support a * wide variety of tasks and generate text that continues from or "completes" provided prompt data. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return chat completions for the provided chat messages. Completions support a wide variety of tasks and generate * text that continues from or "completes" provided prompt data on successful completion of {@link Mono}. */ @Generated @ServiceMethod(returns = ReturnType.SINGLE) public Mono<ChatCompletions> getChatCompletions( String deploymentId, ChatCompletionsOptions chatCompletionsOptions) { RequestOptions requestOptions = new RequestOptions(); return getChatCompletionsWithResponse( deploymentId, BinaryData.fromObject(chatCompletionsOptions), requestOptions) .flatMap(FluxUtil::toMono) .map(protocolMethodData -> protocolMethodData.toObject(ChatCompletions.class)); } }
class OpenAIAsyncClient { @Generated private final OpenAIClientImpl serviceClient; /** * Initializes an instance of OpenAIAsyncClient class. * * @param serviceClient the service client implementation. */ @Generated OpenAIAsyncClient(OpenAIClientImpl serviceClient) { this.serviceClient = serviceClient; } /** * Return the embeddings for a given prompt. * * <p><strong>Request Body Schema</strong> * * <pre>{@code * { * user: String (Optional) * model: String (Optional) * input: InputModelBase (Required) * } * }</pre> * * <p><strong>Response Body Schema</strong> * * <pre>{@code * { * data (Required): [ * (Required){ * embedding (Required): [ * double (Required) * ] * index: int (Required) * } * ] * usage (Required): { * prompt_tokens: int (Required) * total_tokens: int (Required) * } * } * }</pre> * * @param deploymentId deployment id of the deployed model. * @param embeddingsOptions The configuration information for an embeddings request. Embeddings measure the * relatedness of text strings and are commonly used for search, clustering, recommendations, and other similar * scenarios. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @return representation of the response data from an embeddings request. Embeddings measure the relatedness of * text strings and are commonly used for search, clustering, recommendations, and other similar scenarios along * with {@link Response} on successful completion of {@link Mono}. */ @Generated @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<BinaryData>> getEmbeddingsWithResponse( String deploymentId, BinaryData embeddingsOptions, RequestOptions requestOptions) { return this.serviceClient.getEmbeddingsWithResponseAsync(deploymentId, embeddingsOptions, requestOptions); } /** * Gets completions for the provided input prompts. Completions support a wide variety of tasks and generate text * that continues from or "completes" provided prompt data. * * <p><strong>Request Body Schema</strong> * * <pre>{@code * { * prompt (Required): [ * String (Required) * ] * max_tokens: Integer (Optional) * temperature: Double (Optional) * top_p: Double (Optional) * logit_bias (Optional): { * String: int (Optional) * } * user: String (Optional) * n: Integer (Optional) * logprobs: Integer (Optional) * echo: Boolean (Optional) * stop (Optional): [ * String (Optional) * ] * presence_penalty: Double (Optional) * frequency_penalty: Double (Optional) * best_of: Integer (Optional) * stream: Boolean (Optional) * model: String (Optional) * } * }</pre> * * <p><strong>Response Body Schema</strong> * * <pre>{@code * { * id: String (Required) * created: int (Required) * choices (Required): [ * (Required){ * text: String (Required) * index: int (Required) * logprobs (Required): { * tokens (Required): [ * String (Required) * ] * token_logprobs (Required): [ * double (Required) * ] * top_logprobs (Required): [ * (Required){ * String: double (Required) * } * ] * text_offset (Required): [ * int (Required) * ] * } * finish_reason: String(stopped/tokenLimitReached/contentFiltered) (Required) * } * ] * usage (Required): { * completion_tokens: int (Required) * prompt_tokens: int (Required) * total_tokens: int (Required) * } * } * }</pre> * * @param deploymentId deployment id of the deployed model. * @param completionsOptions The configuration information for a completions request. Completions support a wide * variety of tasks and generate text that continues from or "completes" provided prompt data. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @return completions for the provided input prompts. Completions support a wide variety of tasks and generate text * that continues from or "completes" provided prompt data along with {@link Response} on successful completion * of {@link Mono}. */ @Generated @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<BinaryData>> getCompletionsWithResponse( String deploymentId, BinaryData completionsOptions, RequestOptions requestOptions) { return this.serviceClient.getCompletionsWithResponseAsync(deploymentId, completionsOptions, requestOptions); } /** * Gets chat completions for the provided chat messages. Completions support a wide variety of tasks and generate * text that continues from or "completes" provided prompt data. * * <p><strong>Request Body Schema</strong> * * <pre>{@code * { * messages (Required): [ * (Required){ * role: String(system/assistant/user) (Required) * content: String (Optional) * } * ] * max_tokens: Integer (Optional) * temperature: Double (Optional) * top_p: Double (Optional) * logit_bias (Optional): { * String: int (Optional) * } * user: String (Optional) * n: Integer (Optional) * stop (Optional): [ * String (Optional) * ] * presence_penalty: Double (Optional) * frequency_penalty: Double (Optional) * stream: Boolean (Optional) * model: String (Optional) * } * }</pre> * * <p><strong>Response Body Schema</strong> * * <pre>{@code * { * id: String (Required) * created: int (Required) * choices (Required): [ * (Required){ * message (Optional): { * role: String(system/assistant/user) (Required) * content: String (Optional) * } * index: int (Required) * finish_reason: String(stopped/tokenLimitReached/contentFiltered) (Required) * delta (Optional): (recursive schema, see delta above) * } * ] * usage (Required): { * completion_tokens: int (Required) * prompt_tokens: int (Required) * total_tokens: int (Required) * } * } * }</pre> * * @param deploymentId deployment id of the deployed model. * @param chatCompletionsOptions The configuration information for a chat completions request. Completions support a * wide variety of tasks and generate text that continues from or "completes" provided prompt data. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @return chat completions for the provided chat messages. Completions support a wide variety of tasks and generate * text that continues from or "completes" provided prompt data along with {@link Response} on successful * completion of {@link Mono}. */ @Generated @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<BinaryData>> getChatCompletionsWithResponse( String deploymentId, BinaryData chatCompletionsOptions, RequestOptions requestOptions) { return this.serviceClient.getChatCompletionsWithResponseAsync( deploymentId, chatCompletionsOptions, requestOptions); } /** * Return the embeddings for a given prompt. * * @param deploymentId deployment id of the deployed model. * @param embeddingsOptions The configuration information for an embeddings request. Embeddings measure the * relatedness of text strings and are commonly used for search, clustering, recommendations, and other similar * scenarios. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return representation of the response data from an embeddings request. Embeddings measure the relatedness of * text strings and are commonly used for search, clustering, recommendations, and other similar scenarios on * successful completion of {@link Mono}. */ @Generated @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Embeddings> getEmbeddings(String deploymentId, EmbeddingsOptions embeddingsOptions) { RequestOptions requestOptions = new RequestOptions(); return getEmbeddingsWithResponse(deploymentId, BinaryData.fromObject(embeddingsOptions), requestOptions) .flatMap(FluxUtil::toMono) .map(protocolMethodData -> protocolMethodData.toObject(Embeddings.class)); } /** * Gets completions for the provided input prompts. Completions support a wide variety of tasks and generate text * that continues from or "completes" provided prompt data. * * @param deploymentId deployment id of the deployed model. * @param completionsOptions The configuration information for a completions request. Completions support a wide * variety of tasks and generate text that continues from or "completes" provided prompt data. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return completions for the provided input prompts. Completions support a wide variety of tasks and generate text * that continues from or "completes" provided prompt data on successful completion of {@link Mono}. */ @Generated @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Completions> getCompletions(String deploymentId, CompletionsOptions completionsOptions) { RequestOptions requestOptions = new RequestOptions(); return getCompletionsWithResponse(deploymentId, BinaryData.fromObject(completionsOptions), requestOptions) .flatMap(FluxUtil::toMono) .map(protocolMethodData -> protocolMethodData.toObject(Completions.class)); } /** * Gets completions for the provided input prompt. Completions support a wide variety of tasks and generate text * that continues from or "completes" provided prompt data. * * @param deploymentId deployment id of the deployed model. * @param prompt The prompt to generate completion text from. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return completions for the provided input prompts. Completions support a wide variety of tasks and generate text * that continues from or "completes" provided prompt data on successful completion of {@link Mono}. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Gets completions as a stream for the provided input prompts. Completions support a wide variety of tasks and * generate text that continues from or "completes" provided prompt data. * * @param deploymentId deployment id of the deployed model. * @param completionsOptions The configuration information for a completions request. Completions support a wide * variety of tasks and generate text that continues from or "completes" provided prompt data. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return a {@link Flux} of completions for the provided input prompts. Completions support a wide variety of tasks * and generate text that continues from or "completes" provided prompt data. */ @ServiceMethod(returns = ReturnType.COLLECTION) public Flux<Completions> getCompletionsStream(String deploymentId, CompletionsOptions completionsOptions) { RequestOptions requestOptions = new RequestOptions(); BinaryData requestBody = BinaryData.fromObject(completionsOptions); Flux<ByteBuffer> responseStream = getCompletionsWithResponse(deploymentId, requestBody, requestOptions) .flatMapMany(response -> response.getValue().toFluxByteBuffer()); OpenAIServerSentEvents<Completions> completionsStream = new OpenAIServerSentEvents<>(responseStream, Completions.class); return completionsStream.getEvents(); } /** * Gets chat completions for the provided chat messages. Completions support a wide variety of tasks and generate * text that continues from or "completes" provided prompt data. * * @param deploymentId deployment id of the deployed model. * @param chatCompletionsOptions The configuration information for a chat completions request. Completions support a * wide variety of tasks and generate text that continues from or "completes" provided prompt data. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return chat completions for the provided chat messages. Completions support a wide variety of tasks and generate * text that continues from or "completes" provided prompt data on successful completion of {@link Mono}. */ @Generated @ServiceMethod(returns = ReturnType.SINGLE) public Mono<ChatCompletions> getChatCompletions( String deploymentId, ChatCompletionsOptions chatCompletionsOptions) { RequestOptions requestOptions = new RequestOptions(); return getChatCompletionsWithResponse( deploymentId, BinaryData.fromObject(chatCompletionsOptions), requestOptions) .flatMap(FluxUtil::toMono) .map(protocolMethodData -> protocolMethodData.toObject(ChatCompletions.class)); } }
Same here - call the convenience method.
public Completions getCompletions(String deploymentId, String prompt) { RequestOptions requestOptions = new RequestOptions(); CompletionsOptions completionsOptions = CompletionsUtils.DefaultCompletionsOptions(prompt); return getCompletionsWithResponse(deploymentId, BinaryData.fromObject(completionsOptions), requestOptions) .getValue() .toObject(Completions.class); }
return getCompletionsWithResponse(deploymentId, BinaryData.fromObject(completionsOptions), requestOptions)
public Completions getCompletions(String deploymentId, String prompt) { return getCompletions(deploymentId, CompletionsUtils.defaultCompletionsOptions(prompt)); }
class OpenAIClient { @Generated private final OpenAIAsyncClient client; /** * Initializes an instance of OpenAIClient class. * * @param client the async client. */ @Generated OpenAIClient(OpenAIAsyncClient client) { this.client = client; } /** * Return the embeddings for a given prompt. * * <p><strong>Request Body Schema</strong> * * <pre>{@code * { * user: String (Optional) * model: String (Optional) * input: InputModelBase (Required) * } * }</pre> * * <p><strong>Response Body Schema</strong> * * <pre>{@code * { * data (Required): [ * (Required){ * embedding (Required): [ * double (Required) * ] * index: int (Required) * } * ] * usage (Required): { * prompt_tokens: int (Required) * total_tokens: int (Required) * } * } * }</pre> * * @param deploymentId deployment id of the deployed model. * @param embeddingsOptions The configuration information for an embeddings request. Embeddings measure the * relatedness of text strings and are commonly used for search, clustering, recommendations, and other similar * scenarios. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @return representation of the response data from an embeddings request. Embeddings measure the relatedness of * text strings and are commonly used for search, clustering, recommendations, and other similar scenarios along * with {@link Response}. */ @Generated @ServiceMethod(returns = ReturnType.SINGLE) public Response<BinaryData> getEmbeddingsWithResponse( String deploymentId, BinaryData embeddingsOptions, RequestOptions requestOptions) { return this.client.getEmbeddingsWithResponse(deploymentId, embeddingsOptions, requestOptions).block(); } /** * Gets completions for the provided input prompts. Completions support a wide variety of tasks and generate text * that continues from or "completes" provided prompt data. * * <p><strong>Request Body Schema</strong> * * <pre>{@code * { * prompt (Required): [ * String (Required) * ] * max_tokens: Integer (Optional) * temperature: Double (Optional) * top_p: Double (Optional) * logit_bias (Optional): { * String: int (Optional) * } * user: String (Optional) * n: Integer (Optional) * logprobs: Integer (Optional) * echo: Boolean (Optional) * stop (Optional): [ * String (Optional) * ] * presence_penalty: Double (Optional) * frequency_penalty: Double (Optional) * best_of: Integer (Optional) * stream: Boolean (Optional) * model: String (Optional) * } * }</pre> * * <p><strong>Response Body Schema</strong> * * <pre>{@code * { * id: String (Required) * created: int (Required) * choices (Required): [ * (Required){ * text: String (Required) * index: int (Required) * logprobs (Required): { * tokens (Required): [ * String (Required) * ] * token_logprobs (Required): [ * double (Required) * ] * top_logprobs (Required): [ * (Required){ * String: double (Required) * } * ] * text_offset (Required): [ * int (Required) * ] * } * finish_reason: String(stopped/tokenLimitReached/contentFiltered) (Required) * } * ] * usage (Required): { * completion_tokens: int (Required) * prompt_tokens: int (Required) * total_tokens: int (Required) * } * } * }</pre> * * @param deploymentId deployment id of the deployed model. * @param completionsOptions The configuration information for a completions request. Completions support a wide * variety of tasks and generate text that continues from or "completes" provided prompt data. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @return completions for the provided input prompts. Completions support a wide variety of tasks and generate text * that continues from or "completes" provided prompt data along with {@link Response}. */ @Generated @ServiceMethod(returns = ReturnType.SINGLE) public Response<BinaryData> getCompletionsWithResponse( String deploymentId, BinaryData completionsOptions, RequestOptions requestOptions) { return this.client.getCompletionsWithResponse(deploymentId, completionsOptions, requestOptions).block(); } /** * Gets completions for the provided input prompts. Completions support a wide variety of tasks and generate text * that continues from or "completes" provided prompt data. * * <p><strong>Request Body Schema</strong> * * <pre>{@code * { * prompt (Required): [ * String (Required) * ] * max_tokens: Integer (Optional) * temperature: Double (Optional) * top_p: Double (Optional) * logit_bias (Optional): { * String: int (Optional) * } * user: String (Optional) * n: Integer (Optional) * logprobs: Integer (Optional) * echo: Boolean (Optional) * stop (Optional): [ * String (Optional) * ] * presence_penalty: Double (Optional) * frequency_penalty: Double (Optional) * best_of: Integer (Optional) * stream: Boolean (Optional) * model: String (Optional) * } * }</pre> * * <p><strong>Response Body Schema</strong> * * <pre>{@code * { * id: String (Required) * created: int (Required) * choices (Required): [ * (Required){ * text: String (Required) * index: int (Required) * logprobs (Required): { * tokens (Required): [ * String (Required) * ] * token_logprobs (Required): [ * double (Required) * ] * top_logprobs (Required): [ * (Required){ * String: double (Required) * } * ] * text_offset (Required): [ * int (Required) * ] * } * finish_reason: String(stopped/tokenLimitReached/contentFiltered) (Required) * } * ] * usage (Required): { * completion_tokens: int (Required) * prompt_tokens: int (Required) * total_tokens: int (Required) * } * } * }</pre> * * @param deploymentId deployment id of the deployed model. * @param prompt The prompts to generate values from. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @return completions for the provided input prompts. Completions support a wide variety of tasks and generate text * that continues from or "completes" provided prompt data along with {@link Response}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<BinaryData> getCompletionsWithResponse( String deploymentId, String prompt, RequestOptions requestOptions) { CompletionsOptions completionsOptions = CompletionsUtils.DefaultCompletionsOptions(prompt); BinaryData completionsOptionsRequest = BinaryData.fromObject(completionsOptions); return this.client.getCompletionsWithResponse(deploymentId, completionsOptionsRequest, requestOptions).block(); } /** * Gets chat completions for the provided chat messages. Completions support a wide variety of tasks and generate * text that continues from or "completes" provided prompt data. * * <p><strong>Request Body Schema</strong> * * <pre>{@code * { * messages (Required): [ * (Required){ * role: String(system/assistant/user) (Required) * content: String (Optional) * } * ] * max_tokens: Integer (Optional) * temperature: Double (Optional) * top_p: Double (Optional) * logit_bias (Optional): { * String: int (Optional) * } * user: String (Optional) * n: Integer (Optional) * stop (Optional): [ * String (Optional) * ] * presence_penalty: Double (Optional) * frequency_penalty: Double (Optional) * stream: Boolean (Optional) * model: String (Optional) * } * }</pre> * * <p><strong>Response Body Schema</strong> * * <pre>{@code * { * id: String (Required) * created: int (Required) * choices (Required): [ * (Required){ * message (Optional): { * role: String(system/assistant/user) (Required) * content: String (Optional) * } * index: int (Required) * finish_reason: String(stopped/tokenLimitReached/contentFiltered) (Required) * delta (Optional): (recursive schema, see delta above) * } * ] * usage (Required): { * completion_tokens: int (Required) * prompt_tokens: int (Required) * total_tokens: int (Required) * } * } * }</pre> * * @param deploymentId deployment id of the deployed model. * @param chatCompletionsOptions The configuration information for a chat completions request. Completions support a * wide variety of tasks and generate text that continues from or "completes" provided prompt data. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @return chat completions for the provided chat messages. Completions support a wide variety of tasks and generate * text that continues from or "completes" provided prompt data along with {@link Response}. */ @Generated @ServiceMethod(returns = ReturnType.SINGLE) public Response<BinaryData> getChatCompletionsWithResponse( String deploymentId, BinaryData chatCompletionsOptions, RequestOptions requestOptions) { return this.client.getChatCompletionsWithResponse(deploymentId, chatCompletionsOptions, requestOptions).block(); } /** * Return the embeddings for a given prompt. * * @param deploymentId deployment id of the deployed model. * @param embeddingsOptions The configuration information for an embeddings request. Embeddings measure the * relatedness of text strings and are commonly used for search, clustering, recommendations, and other similar * scenarios. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return representation of the response data from an embeddings request. Embeddings measure the relatedness of * text strings and are commonly used for search, clustering, recommendations, and other similar scenarios. */ @Generated @ServiceMethod(returns = ReturnType.SINGLE) public Embeddings getEmbeddings(String deploymentId, EmbeddingsOptions embeddingsOptions) { RequestOptions requestOptions = new RequestOptions(); return getEmbeddingsWithResponse(deploymentId, BinaryData.fromObject(embeddingsOptions), requestOptions) .getValue() .toObject(Embeddings.class); } /** * Gets completions for the provided input prompts. Completions support a wide variety of tasks and generate text * that continues from or "completes" provided prompt data. * * @param deploymentId deployment id of the deployed model. * @param completionsOptions The configuration information for a completions request. Completions support a wide * variety of tasks and generate text that continues from or "completes" provided prompt data. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return completions for the provided input prompts. Completions support a wide variety of tasks and generate text * that continues from or "completes" provided prompt data. */ @Generated @ServiceMethod(returns = ReturnType.SINGLE) public Completions getCompletions(String deploymentId, CompletionsOptions completionsOptions) { RequestOptions requestOptions = new RequestOptions(); return getCompletionsWithResponse(deploymentId, BinaryData.fromObject(completionsOptions), requestOptions) .getValue() .toObject(Completions.class); } /** * Gets completions for the provided input prompts. Completions support a wide variety of tasks and generate text * that continues from or "completes" provided prompt data. * * @param deploymentId deployment id of the deployed model. * @param prompt The prompts to generate values from. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return completions for the provided input prompts. Completions support a wide variety of tasks and generate text * that continues from or "completes" provided prompt data. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Gets chat completions for the provided chat messages. Completions support a wide variety of tasks and generate * text that continues from or "completes" provided prompt data. * * @param deploymentId deployment id of the deployed model. * @param chatCompletionsOptions The configuration information for a chat completions request. Completions support a * wide variety of tasks and generate text that continues from or "completes" provided prompt data. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return chat completions for the provided chat messages. Completions support a wide variety of tasks and generate * text that continues from or "completes" provided prompt data. */ @Generated @ServiceMethod(returns = ReturnType.SINGLE) public ChatCompletions getChatCompletions(String deploymentId, ChatCompletionsOptions chatCompletionsOptions) { RequestOptions requestOptions = new RequestOptions(); return getChatCompletionsWithResponse( deploymentId, BinaryData.fromObject(chatCompletionsOptions), requestOptions) .getValue() .toObject(ChatCompletions.class); } }
class OpenAIClient { @Generated private final OpenAIAsyncClient client; /** * Initializes an instance of OpenAIClient class. * * @param client the async client. */ @Generated OpenAIClient(OpenAIAsyncClient client) { this.client = client; } /** * Return the embeddings for a given prompt. * * <p><strong>Request Body Schema</strong> * * <pre>{@code * { * user: String (Optional) * model: String (Optional) * input: InputModelBase (Required) * } * }</pre> * * <p><strong>Response Body Schema</strong> * * <pre>{@code * { * data (Required): [ * (Required){ * embedding (Required): [ * double (Required) * ] * index: int (Required) * } * ] * usage (Required): { * prompt_tokens: int (Required) * total_tokens: int (Required) * } * } * }</pre> * * @param deploymentId deployment id of the deployed model. * @param embeddingsOptions The configuration information for an embeddings request. Embeddings measure the * relatedness of text strings and are commonly used for search, clustering, recommendations, and other similar * scenarios. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @return representation of the response data from an embeddings request. Embeddings measure the relatedness of * text strings and are commonly used for search, clustering, recommendations, and other similar scenarios along * with {@link Response}. */ @Generated @ServiceMethod(returns = ReturnType.SINGLE) public Response<BinaryData> getEmbeddingsWithResponse( String deploymentId, BinaryData embeddingsOptions, RequestOptions requestOptions) { return this.client.getEmbeddingsWithResponse(deploymentId, embeddingsOptions, requestOptions).block(); } /** * Gets completions for the provided input prompts. Completions support a wide variety of tasks and generate text * that continues from or "completes" provided prompt data. * * <p><strong>Request Body Schema</strong> * * <pre>{@code * { * prompt (Required): [ * String (Required) * ] * max_tokens: Integer (Optional) * temperature: Double (Optional) * top_p: Double (Optional) * logit_bias (Optional): { * String: int (Optional) * } * user: String (Optional) * n: Integer (Optional) * logprobs: Integer (Optional) * echo: Boolean (Optional) * stop (Optional): [ * String (Optional) * ] * presence_penalty: Double (Optional) * frequency_penalty: Double (Optional) * best_of: Integer (Optional) * stream: Boolean (Optional) * model: String (Optional) * } * }</pre> * * <p><strong>Response Body Schema</strong> * * <pre>{@code * { * id: String (Required) * created: int (Required) * choices (Required): [ * (Required){ * text: String (Required) * index: int (Required) * logprobs (Required): { * tokens (Required): [ * String (Required) * ] * token_logprobs (Required): [ * double (Required) * ] * top_logprobs (Required): [ * (Required){ * String: double (Required) * } * ] * text_offset (Required): [ * int (Required) * ] * } * finish_reason: String(stopped/tokenLimitReached/contentFiltered) (Required) * } * ] * usage (Required): { * completion_tokens: int (Required) * prompt_tokens: int (Required) * total_tokens: int (Required) * } * } * }</pre> * * @param deploymentId deployment id of the deployed model. * @param completionsOptions The configuration information for a completions request. Completions support a wide * variety of tasks and generate text that continues from or "completes" provided prompt data. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @return completions for the provided input prompts. Completions support a wide variety of tasks and generate text * that continues from or "completes" provided prompt data along with {@link Response}. */ @Generated @ServiceMethod(returns = ReturnType.SINGLE) public Response<BinaryData> getCompletionsWithResponse( String deploymentId, BinaryData completionsOptions, RequestOptions requestOptions) { return this.client.getCompletionsWithResponse(deploymentId, completionsOptions, requestOptions).block(); } /** * Gets chat completions for the provided chat messages. Completions support a wide variety of tasks and generate * text that continues from or "completes" provided prompt data. * * <p><strong>Request Body Schema</strong> * * <pre>{@code * { * messages (Required): [ * (Required){ * role: String(system/assistant/user) (Required) * content: String (Optional) * } * ] * max_tokens: Integer (Optional) * temperature: Double (Optional) * top_p: Double (Optional) * logit_bias (Optional): { * String: int (Optional) * } * user: String (Optional) * n: Integer (Optional) * stop (Optional): [ * String (Optional) * ] * presence_penalty: Double (Optional) * frequency_penalty: Double (Optional) * stream: Boolean (Optional) * model: String (Optional) * } * }</pre> * * <p><strong>Response Body Schema</strong> * * <pre>{@code * { * id: String (Required) * created: int (Required) * choices (Required): [ * (Required){ * message (Optional): { * role: String(system/assistant/user) (Required) * content: String (Optional) * } * index: int (Required) * finish_reason: String(stopped/tokenLimitReached/contentFiltered) (Required) * delta (Optional): (recursive schema, see delta above) * } * ] * usage (Required): { * completion_tokens: int (Required) * prompt_tokens: int (Required) * total_tokens: int (Required) * } * } * }</pre> * * @param deploymentId deployment id of the deployed model. * @param chatCompletionsOptions The configuration information for a chat completions request. Completions support a * wide variety of tasks and generate text that continues from or "completes" provided prompt data. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @return chat completions for the provided chat messages. Completions support a wide variety of tasks and generate * text that continues from or "completes" provided prompt data along with {@link Response}. */ @Generated @ServiceMethod(returns = ReturnType.SINGLE) public Response<BinaryData> getChatCompletionsWithResponse( String deploymentId, BinaryData chatCompletionsOptions, RequestOptions requestOptions) { return this.client.getChatCompletionsWithResponse(deploymentId, chatCompletionsOptions, requestOptions).block(); } /** * Return the embeddings for a given prompt. * * @param deploymentId deployment id of the deployed model. * @param embeddingsOptions The configuration information for an embeddings request. Embeddings measure the * relatedness of text strings and are commonly used for search, clustering, recommendations, and other similar * scenarios. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return representation of the response data from an embeddings request. Embeddings measure the relatedness of * text strings and are commonly used for search, clustering, recommendations, and other similar scenarios. */ @Generated @ServiceMethod(returns = ReturnType.SINGLE) public Embeddings getEmbeddings(String deploymentId, EmbeddingsOptions embeddingsOptions) { RequestOptions requestOptions = new RequestOptions(); return getEmbeddingsWithResponse(deploymentId, BinaryData.fromObject(embeddingsOptions), requestOptions) .getValue() .toObject(Embeddings.class); } /** * Gets completions for the provided input prompts. Completions support a wide variety of tasks and generate text * that continues from or "completes" provided prompt data. * * @param deploymentId deployment id of the deployed model. * @param completionsOptions The configuration information for a completions request. Completions support a wide * variety of tasks and generate text that continues from or "completes" provided prompt data. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return completions for the provided input prompts. Completions support a wide variety of tasks and generate text * that continues from or "completes" provided prompt data. */ @Generated @ServiceMethod(returns = ReturnType.SINGLE) public Completions getCompletions(String deploymentId, CompletionsOptions completionsOptions) { RequestOptions requestOptions = new RequestOptions(); return getCompletionsWithResponse(deploymentId, BinaryData.fromObject(completionsOptions), requestOptions) .getValue() .toObject(Completions.class); } /** * Gets completions for the provided input prompt. Completions support a wide variety of tasks and generate text * that continues from or "completes" provided prompt data. * * @param deploymentId deployment id of the deployed model. * @param prompt The prompt to generate completion text from. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return completions for the provided input prompts. Completions support a wide variety of tasks and generate text * that continues from or "completes" provided prompt data. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Gets completions as a stream for the provided input prompts. Completions support a wide variety of tasks and * generate text that continues from or "completes" provided prompt data. * * @param deploymentId deployment id of the deployed model. * @param completionsOptions The configuration information for a completions request. Completions support a wide * variety of tasks and generate text that continues from or "completes" provided prompt data. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return an {@link IterableStream} of completions for the provided input prompts. Completions support a wide * variety of tasks and generate text that continues from or "completes" provided prompt data. */ @ServiceMethod(returns = ReturnType.COLLECTION) public IterableStream<Completions> getCompletionsStream( String deploymentId, CompletionsOptions completionsOptions) { RequestOptions requestOptions = new RequestOptions(); Flux<ByteBuffer> responseStream = getCompletionsWithResponse(deploymentId, BinaryData.fromObject(completionsOptions), requestOptions) .getValue() .toFluxByteBuffer(); OpenAIServerSentEvents<Completions> completionsStream = new OpenAIServerSentEvents<>(responseStream, Completions.class); return new IterableStream<>(completionsStream.getEvents()); } /** * Gets chat completions for the provided chat messages. Completions support a wide variety of tasks and generate * text that continues from or "completes" provided prompt data. * * @param deploymentId deployment id of the deployed model. * @param chatCompletionsOptions The configuration information for a chat completions request. Completions support a * wide variety of tasks and generate text that continues from or "completes" provided prompt data. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return chat completions for the provided chat messages. Completions support a wide variety of tasks and generate * text that continues from or "completes" provided prompt data. */ @Generated @ServiceMethod(returns = ReturnType.SINGLE) public ChatCompletions getChatCompletions(String deploymentId, ChatCompletionsOptions chatCompletionsOptions) { RequestOptions requestOptions = new RequestOptions(); return getChatCompletionsWithResponse( deploymentId, BinaryData.fromObject(chatCompletionsOptions), requestOptions) .getValue() .toObject(ChatCompletions.class); } }
```suggestion String azureOpenAIKey = "{azure-open-ai-key}"; ```
public static void main(String[] args) { String azureOpenaiKey = "{azure-open-ai-key}"; String endpoint = "{azure-open-ai-endpoint}"; String deploymentOrModelId = "{azure-open-ai-deployment-model-id}"; OpenAIClient client = new OpenAIClientBuilder() .endpoint(endpoint) .credential(new AzureKeyCredential(azureOpenaiKey)) .buildClient(); String prompt = "Tell me 3 jokes about trains"; Completions completions = client.getCompletions(deploymentOrModelId, prompt); for (Choice choice : completions.getChoices()) { System.out.printf("%s.%n", choice.getText()); } }
String azureOpenaiKey = "{azure-open-ai-key}";
public static void main(String[] args) { String azureOpenAIKey = "{azure-open-ai-key}"; String endpoint = "{azure-open-ai-endpoint}"; String deploymentOrModelId = "{azure-open-ai-deployment-model-id}"; OpenAIClient client = new OpenAIClientBuilder() .endpoint(endpoint) .credential(new AzureKeyCredential(azureOpenAIKey)) .buildClient(); String prompt = "Tell me 3 jokes about trains"; Completions completions = client.getCompletions(deploymentOrModelId, prompt); for (Choice choice : completions.getChoices()) { System.out.printf("%s.%n", choice.getText()); } }
class GetCompletionsFromPrompt { /** * The sample will return the text choices that are generated based on the prompt provided by the user * * @param args Unused. Arguments to the program. */ }
class GetCompletionsFromPrompt { /** * The sample will return the text choices that are generated based on the prompt provided by the user * * @param args Unused. Arguments to the program. */ }
This is the handler for completing the publisher successfully. So, this should just print a message saying the client method completed successfully.
public static void main(String[] args) throws InterruptedException { String azureOpenaiKey = "{azure-open-ai-key}"; String endpoint = "{azure-open-ai-endpoint}"; String deploymentOrModelId = "{azure-open-ai-deployment-model-id}"; OpenAIAsyncClient client = new OpenAIClientBuilder() .endpoint(endpoint) .credential(new AzureKeyCredential(azureOpenaiKey)) .buildAsyncClient(); String prompt = "Tell me 3 facts about pineapples"; Sinks.Empty<Void> completionSink = Sinks.empty(); client.getCompletions(deploymentOrModelId, prompt) .timeout(Duration.ofSeconds(10)) .subscribe( completions -> { for (Choice choice : completions.getChoices()) { System.out.printf("%s.%n", choice.getText()); } }, error -> System.err.println("There was an error getting completions." + error), () -> completionSink.emitEmpty(Sinks.EmitFailureHandler.FAIL_FAST) ); completionSink.asMono().block(); }
() -> completionSink.emitEmpty(Sinks.EmitFailureHandler.FAIL_FAST)
public static void main(String[] args) throws InterruptedException { String azureOpenAIKey = "{azure-open-ai-key}"; String endpoint = "{azure-open-ai-endpoint}"; String deploymentOrModelId = "{azure-open-ai-deployment-model-id}"; OpenAIAsyncClient client = new OpenAIClientBuilder() .endpoint(endpoint) .credential(new AzureKeyCredential(azureOpenAIKey)) .buildAsyncClient(); String prompt = "Tell me 3 facts about pineapples"; client.getCompletions(deploymentOrModelId, prompt) .subscribe( completions -> { for (Choice choice : completions.getChoices()) { System.out.printf("%s.%n", choice.getText()); } }, error -> System.err.println("There was an error getting completions." + error), () -> System.out.println("Completed called getCompletions.") ); TimeUnit.SECONDS.sleep(10); }
class GetCompletionsFromPromptAsync { /** * The sample will return the text choices that are generated based on the prompt provided by the user * * @param args Unused. Arguments to the program. */ }
class GetCompletionsFromPromptAsync { /** * The sample will return the text choices that are generated based on the prompt provided by the user * * @param args Unused. Arguments to the program. */ }
In async samples, we don't generally use `block()`.
public static void main(String[] args) throws InterruptedException { String azureOpenaiKey = "{azure-open-ai-key}"; String endpoint = "{azure-open-ai-endpoint}"; String deploymentOrModelId = "{azure-open-ai-deployment-model-id}"; OpenAIAsyncClient client = new OpenAIClientBuilder() .endpoint(endpoint) .credential(new AzureKeyCredential(azureOpenaiKey)) .buildAsyncClient(); String prompt = "Tell me 3 facts about pineapples"; Sinks.Empty<Void> completionSink = Sinks.empty(); client.getCompletions(deploymentOrModelId, prompt) .timeout(Duration.ofSeconds(10)) .subscribe( completions -> { for (Choice choice : completions.getChoices()) { System.out.printf("%s.%n", choice.getText()); } }, error -> System.err.println("There was an error getting completions." + error), () -> completionSink.emitEmpty(Sinks.EmitFailureHandler.FAIL_FAST) ); completionSink.asMono().block(); }
completionSink.asMono().block();
public static void main(String[] args) throws InterruptedException { String azureOpenAIKey = "{azure-open-ai-key}"; String endpoint = "{azure-open-ai-endpoint}"; String deploymentOrModelId = "{azure-open-ai-deployment-model-id}"; OpenAIAsyncClient client = new OpenAIClientBuilder() .endpoint(endpoint) .credential(new AzureKeyCredential(azureOpenAIKey)) .buildAsyncClient(); String prompt = "Tell me 3 facts about pineapples"; client.getCompletions(deploymentOrModelId, prompt) .subscribe( completions -> { for (Choice choice : completions.getChoices()) { System.out.printf("%s.%n", choice.getText()); } }, error -> System.err.println("There was an error getting completions." + error), () -> System.out.println("Completed called getCompletions.") ); TimeUnit.SECONDS.sleep(10); }
class GetCompletionsFromPromptAsync { /** * The sample will return the text choices that are generated based on the prompt provided by the user * * @param args Unused. Arguments to the program. */ }
class GetCompletionsFromPromptAsync { /** * The sample will return the text choices that are generated based on the prompt provided by the user * * @param args Unused. Arguments to the program. */ }
I did this thinking that it would be equivalent to `sleep` like in other samples, with the added advantage of not blocking for the full amount of the sleep duration. This call would normally resolve in less than a second. Although, if for the sake of consistency `sleep` is preferable, I'd be happy to change it to that.
public static void main(String[] args) throws InterruptedException { String azureOpenaiKey = "{azure-open-ai-key}"; String endpoint = "{azure-open-ai-endpoint}"; String deploymentOrModelId = "{azure-open-ai-deployment-model-id}"; OpenAIAsyncClient client = new OpenAIClientBuilder() .endpoint(endpoint) .credential(new AzureKeyCredential(azureOpenaiKey)) .buildAsyncClient(); String prompt = "Tell me 3 facts about pineapples"; Sinks.Empty<Void> completionSink = Sinks.empty(); client.getCompletions(deploymentOrModelId, prompt) .timeout(Duration.ofSeconds(10)) .subscribe( completions -> { for (Choice choice : completions.getChoices()) { System.out.printf("%s.%n", choice.getText()); } }, error -> System.err.println("There was an error getting completions." + error), () -> completionSink.emitEmpty(Sinks.EmitFailureHandler.FAIL_FAST) ); completionSink.asMono().block(); }
completionSink.asMono().block();
public static void main(String[] args) throws InterruptedException { String azureOpenAIKey = "{azure-open-ai-key}"; String endpoint = "{azure-open-ai-endpoint}"; String deploymentOrModelId = "{azure-open-ai-deployment-model-id}"; OpenAIAsyncClient client = new OpenAIClientBuilder() .endpoint(endpoint) .credential(new AzureKeyCredential(azureOpenAIKey)) .buildAsyncClient(); String prompt = "Tell me 3 facts about pineapples"; client.getCompletions(deploymentOrModelId, prompt) .subscribe( completions -> { for (Choice choice : completions.getChoices()) { System.out.printf("%s.%n", choice.getText()); } }, error -> System.err.println("There was an error getting completions." + error), () -> System.out.println("Completed called getCompletions.") ); TimeUnit.SECONDS.sleep(10); }
class GetCompletionsFromPromptAsync { /** * The sample will return the text choices that are generated based on the prompt provided by the user * * @param args Unused. Arguments to the program. */ }
class GetCompletionsFromPromptAsync { /** * The sample will return the text choices that are generated based on the prompt provided by the user * * @param args Unused. Arguments to the program. */ }
For an async example, we want to show customers how to use it asynchronously without blocking. Adding Mono `.timeout()` and `.block()` will make noise and confusion.
public static void main(String[] args) throws InterruptedException { String azureOpenaiKey = "{azure-open-ai-key}"; String endpoint = "{azure-open-ai-endpoint}"; String deploymentOrModelId = "{azure-open-ai-deployment-model-id}"; OpenAIAsyncClient client = new OpenAIClientBuilder() .endpoint(endpoint) .credential(new AzureKeyCredential(azureOpenaiKey)) .buildAsyncClient(); String prompt = "Tell me 3 facts about pineapples"; Sinks.Empty<Void> completionSink = Sinks.empty(); client.getCompletions(deploymentOrModelId, prompt) .timeout(Duration.ofSeconds(10)) .subscribe( completions -> { for (Choice choice : completions.getChoices()) { System.out.printf("%s.%n", choice.getText()); } }, error -> System.err.println("There was an error getting completions." + error), () -> completionSink.emitEmpty(Sinks.EmitFailureHandler.FAIL_FAST) ); completionSink.asMono().block(); }
completionSink.asMono().block();
public static void main(String[] args) throws InterruptedException { String azureOpenAIKey = "{azure-open-ai-key}"; String endpoint = "{azure-open-ai-endpoint}"; String deploymentOrModelId = "{azure-open-ai-deployment-model-id}"; OpenAIAsyncClient client = new OpenAIClientBuilder() .endpoint(endpoint) .credential(new AzureKeyCredential(azureOpenAIKey)) .buildAsyncClient(); String prompt = "Tell me 3 facts about pineapples"; client.getCompletions(deploymentOrModelId, prompt) .subscribe( completions -> { for (Choice choice : completions.getChoices()) { System.out.printf("%s.%n", choice.getText()); } }, error -> System.err.println("There was an error getting completions." + error), () -> System.out.println("Completed called getCompletions.") ); TimeUnit.SECONDS.sleep(10); }
class GetCompletionsFromPromptAsync { /** * The sample will return the text choices that are generated based on the prompt provided by the user * * @param args Unused. Arguments to the program. */ }
class GetCompletionsFromPromptAsync { /** * The sample will return the text choices that are generated based on the prompt provided by the user * * @param args Unused. Arguments to the program. */ }
Gotcha! My point was that `sleep()` is just as blocking as calling `block()` but I totally see your point of having confusing language in the sample. I will adjust it accordingly.
public static void main(String[] args) throws InterruptedException { String azureOpenaiKey = "{azure-open-ai-key}"; String endpoint = "{azure-open-ai-endpoint}"; String deploymentOrModelId = "{azure-open-ai-deployment-model-id}"; OpenAIAsyncClient client = new OpenAIClientBuilder() .endpoint(endpoint) .credential(new AzureKeyCredential(azureOpenaiKey)) .buildAsyncClient(); String prompt = "Tell me 3 facts about pineapples"; Sinks.Empty<Void> completionSink = Sinks.empty(); client.getCompletions(deploymentOrModelId, prompt) .timeout(Duration.ofSeconds(10)) .subscribe( completions -> { for (Choice choice : completions.getChoices()) { System.out.printf("%s.%n", choice.getText()); } }, error -> System.err.println("There was an error getting completions." + error), () -> completionSink.emitEmpty(Sinks.EmitFailureHandler.FAIL_FAST) ); completionSink.asMono().block(); }
completionSink.asMono().block();
public static void main(String[] args) throws InterruptedException { String azureOpenAIKey = "{azure-open-ai-key}"; String endpoint = "{azure-open-ai-endpoint}"; String deploymentOrModelId = "{azure-open-ai-deployment-model-id}"; OpenAIAsyncClient client = new OpenAIClientBuilder() .endpoint(endpoint) .credential(new AzureKeyCredential(azureOpenAIKey)) .buildAsyncClient(); String prompt = "Tell me 3 facts about pineapples"; client.getCompletions(deploymentOrModelId, prompt) .subscribe( completions -> { for (Choice choice : completions.getChoices()) { System.out.printf("%s.%n", choice.getText()); } }, error -> System.err.println("There was an error getting completions." + error), () -> System.out.println("Completed called getCompletions.") ); TimeUnit.SECONDS.sleep(10); }
class GetCompletionsFromPromptAsync { /** * The sample will return the text choices that are generated based on the prompt provided by the user * * @param args Unused. Arguments to the program. */ }
class GetCompletionsFromPromptAsync { /** * The sample will return the text choices that are generated based on the prompt provided by the user * * @param args Unused. Arguments to the program. */ }
```suggestion // Closing the JsonWriter may throw an IllegalStateException if the current writing state isn't ```
public void afterEach() throws IOException { if (writer != null) { try { writer.close(); } catch (IllegalStateException ignored) { } } }
public void afterEach() throws IOException { if (writer != null) { try { writer.close(); } catch (IllegalStateException ignored) { } } }
class GsonJsonWriterContractTests extends JsonWriterContractTests { private ByteArrayOutputStream outputStream; private JsonWriter writer; @BeforeEach public void beforeEach() throws IOException { this.outputStream = new ByteArrayOutputStream(); this.writer = AzureJsonUtils.createWriter(outputStream, null); } @AfterEach @Override public JsonWriter getJsonWriter() { return writer; } @Override public String getJsonWriterContents() { try { writer.flush(); return outputStream.toString(StandardCharsets.UTF_8.name()); } catch (IOException e) { throw new RuntimeException(e); } } }
class GsonJsonWriterContractTests extends JsonWriterContractTests { private ByteArrayOutputStream outputStream; private JsonWriter writer; @BeforeEach public void beforeEach() throws IOException { this.outputStream = new ByteArrayOutputStream(); this.writer = AzureJsonUtils.createWriter(outputStream, null); } @AfterEach @Override public JsonWriter getJsonWriter() { return writer; } @Override public String getJsonWriterContents() { try { writer.flush(); return outputStream.toString(StandardCharsets.UTF_8.name()); } catch (IOException e) { throw new RuntimeException(e); } } }
Since this was a copy and paste this exists elsewhere, I'll fix those too
public void afterEach() throws IOException { if (writer != null) { try { writer.close(); } catch (IllegalStateException ignored) { } } }
public void afterEach() throws IOException { if (writer != null) { try { writer.close(); } catch (IllegalStateException ignored) { } } }
class GsonJsonWriterContractTests extends JsonWriterContractTests { private ByteArrayOutputStream outputStream; private JsonWriter writer; @BeforeEach public void beforeEach() throws IOException { this.outputStream = new ByteArrayOutputStream(); this.writer = AzureJsonUtils.createWriter(outputStream, null); } @AfterEach @Override public JsonWriter getJsonWriter() { return writer; } @Override public String getJsonWriterContents() { try { writer.flush(); return outputStream.toString(StandardCharsets.UTF_8.name()); } catch (IOException e) { throw new RuntimeException(e); } } }
class GsonJsonWriterContractTests extends JsonWriterContractTests { private ByteArrayOutputStream outputStream; private JsonWriter writer; @BeforeEach public void beforeEach() throws IOException { this.outputStream = new ByteArrayOutputStream(); this.writer = AzureJsonUtils.createWriter(outputStream, null); } @AfterEach @Override public JsonWriter getJsonWriter() { return writer; } @Override public String getJsonWriterContents() { try { writer.flush(); return outputStream.toString(StandardCharsets.UTF_8.name()); } catch (IOException e) { throw new RuntimeException(e); } } }
nit: it would be nice to have a loop here that prints an indiviual participant. Something like this: for (RoomParticipant participant : participants) { System.out.println(participant.getCommunicationIdentifier().getRawId() + " (" + participant.getRole() + ")"); }
public void listRoomParticipantsWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); try { PagedIterable<RoomParticipant> allParticipants = roomsClient.listParticipants("<Room Id>"); } catch (RuntimeException ex) { System.out.println(ex); } }
PagedIterable<RoomParticipant> allParticipants = roomsClient.listParticipants("<Room Id>");
public void listRoomParticipantsWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); try { PagedIterable<RoomParticipant> allParticipants = roomsClient.listParticipants("<Room Id>"); for (RoomParticipant participant : allParticipants) { System.out.println(participant.getCommunicationIdentifier().getRawId() + " (" + participant.getRole() + ")"); } } catch (RuntimeException ex) { System.out.println(ex); } }
class ReadmeSamples { RoomParticipant participant1; RoomParticipant participant2; public RoomsClient createRoomsClientUsingAzureKeyCredential() { String endpoint = "https: AzureKeyCredential azureKeyCredential = new AzureKeyCredential("<access-key>"); RoomsClient roomsClient = new RoomsClientBuilder().endpoint(endpoint).credential(azureKeyCredential) .buildClient(); return roomsClient; } public RoomsAsyncClient createRoomsAsyncClientUsingAzureKeyCredential() { String endpoint = "https: AzureKeyCredential azureKeyCredential = new AzureKeyCredential("<access-key>"); RoomsAsyncClient roomsClient = new RoomsClientBuilder().endpoint(endpoint).credential(azureKeyCredential) .buildAsyncClient(); return roomsClient; } public RoomsClient createRoomsClientWithConnectionString() { String connectionString = "https: RoomsClient roomsClient = new RoomsClientBuilder().connectionString(connectionString).buildClient(); return roomsClient; } public RoomsClient createRoomsClientWithAAD() { String endpoint = "https: RoomsClient roomsClient = new RoomsClientBuilder().endpoint(endpoint) .credential(new DefaultAzureCredentialBuilder().build()).buildClient(); return roomsClient; } public RoomsClient createSyncClientUsingTokenCredential() { TokenCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); String endpoint = "https: RoomsClient roomsClient = new RoomsClientBuilder().endpoint(endpoint).credential(tokenCredential).buildClient(); return roomsClient; } public void createRoomWithValidInput() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); OffsetDateTime validFrom = OffsetDateTime.now(); OffsetDateTime validUntil = validFrom.plusDays(30); List<RoomParticipant> participants = new ArrayList<>(); participant1 = new RoomParticipant(new CommunicationUserIdentifier("<ACS User MRI identity 1>")).setRole(ParticipantRole.ATTENDEE); participant2 = new RoomParticipant(new CommunicationUserIdentifier("<ACS User MRI identity 2>")).setRole(ParticipantRole.CONSUMER); participants.add(participant1); participants.add(participant2); CreateRoomOptions roomOptions = new CreateRoomOptions() .setValidFrom(validFrom) .setValidUntil(validUntil) .setParticipants(participants); CommunicationRoom roomResult = roomsClient.createRoom(roomOptions); System.out.println("Room Id: " + roomResult.getRoomId()); } public void updateRoomWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); OffsetDateTime validFrom = OffsetDateTime.now(); OffsetDateTime validUntil = validFrom.plusDays(30); UpdateRoomOptions updateRoomOptions = new UpdateRoomOptions() .setValidFrom(validFrom) .setValidUntil(validUntil); try { CommunicationRoom roomResult = roomsClient.updateRoom("<Room Id>", updateRoomOptions); System.out.println("Room Id: " + roomResult.getRoomId()); } catch (RuntimeException ex) { System.out.println(ex); } } public void getRoomWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); try { CommunicationRoom roomResult = roomsClient.getRoom("<Room Id>"); System.out.println("Room Id: " + roomResult.getRoomId()); } catch (RuntimeException ex) { System.out.println(ex); } } public void deleteRoomWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); try { roomsClient.deleteRoom("<Room Id>"); } catch (RuntimeException ex) { System.out.println(ex); } } public void addOrUpdateRoomParticipantsWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); List<RoomParticipant> participantsToaddOrUpdate = new ArrayList<>(); RoomParticipant participantToAdd = new RoomParticipant(new CommunicationUserIdentifier("<ACS User MRI identity 3>")).setRole(ParticipantRole.ATTENDEE); participant2 = new RoomParticipant(new CommunicationUserIdentifier("<ACS User MRI identity 2>")).setRole(ParticipantRole.ATTENDEE); participantsToaddOrUpdate.add(participantToAdd); participantsToaddOrUpdate.add(participant2); try { AddOrUpdateParticipantsResult addOrUpdateResult = roomsClient.addOrUpdateParticipants("<Room Id>", participantsToaddOrUpdate); } catch (RuntimeException ex) { System.out.println(ex); } } public void removeRoomParticipantsWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); List<CommunicationIdentifier> participantsToRemove = new ArrayList<>(); participantsToRemove.add(participant1.getCommunicationIdentifier()); participantsToRemove.add(participant2.getCommunicationIdentifier()); try { RemoveParticipantsResult removeResult = roomsClient.removeParticipants("<Room Id>", participantsToRemove); } catch (RuntimeException ex) { System.out.println(ex); } } }
class ReadmeSamples { RoomParticipant participant1; RoomParticipant participant2; public RoomsClient createRoomsClientUsingAzureKeyCredential() { String endpoint = "https: AzureKeyCredential azureKeyCredential = new AzureKeyCredential("<access-key>"); RoomsClient roomsClient = new RoomsClientBuilder().endpoint(endpoint).credential(azureKeyCredential) .buildClient(); return roomsClient; } public RoomsAsyncClient createRoomsAsyncClientUsingAzureKeyCredential() { String endpoint = "https: AzureKeyCredential azureKeyCredential = new AzureKeyCredential("<access-key>"); RoomsAsyncClient roomsClient = new RoomsClientBuilder().endpoint(endpoint).credential(azureKeyCredential) .buildAsyncClient(); return roomsClient; } public RoomsClient createRoomsClientWithConnectionString() { String connectionString = "https: RoomsClient roomsClient = new RoomsClientBuilder().connectionString(connectionString).buildClient(); return roomsClient; } public RoomsClient createRoomsClientWithAAD() { String endpoint = "https: RoomsClient roomsClient = new RoomsClientBuilder().endpoint(endpoint) .credential(new DefaultAzureCredentialBuilder().build()).buildClient(); return roomsClient; } public RoomsClient createSyncClientUsingTokenCredential() { TokenCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); String endpoint = "https: RoomsClient roomsClient = new RoomsClientBuilder().endpoint(endpoint).credential(tokenCredential).buildClient(); return roomsClient; } public void createRoomWithValidInput() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); OffsetDateTime validFrom = OffsetDateTime.now(); OffsetDateTime validUntil = validFrom.plusDays(30); List<RoomParticipant> participants = new ArrayList<>(); participant1 = new RoomParticipant(new CommunicationUserIdentifier("<ACS User MRI identity 1>")).setRole(ParticipantRole.ATTENDEE); participant2 = new RoomParticipant(new CommunicationUserIdentifier("<ACS User MRI identity 2>")).setRole(ParticipantRole.CONSUMER); participants.add(participant1); participants.add(participant2); CreateRoomOptions roomOptions = new CreateRoomOptions() .setValidFrom(validFrom) .setValidUntil(validUntil) .setParticipants(participants); CommunicationRoom roomResult = roomsClient.createRoom(roomOptions); System.out.println("Room Id: " + roomResult.getRoomId()); } public void updateRoomWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); OffsetDateTime validFrom = OffsetDateTime.now(); OffsetDateTime validUntil = validFrom.plusDays(30); UpdateRoomOptions updateRoomOptions = new UpdateRoomOptions() .setValidFrom(validFrom) .setValidUntil(validUntil); try { CommunicationRoom roomResult = roomsClient.updateRoom("<Room Id>", updateRoomOptions); System.out.println("Room Id: " + roomResult.getRoomId()); } catch (RuntimeException ex) { System.out.println(ex); } } public void getRoomWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); try { CommunicationRoom roomResult = roomsClient.getRoom("<Room Id>"); System.out.println("Room Id: " + roomResult.getRoomId()); } catch (RuntimeException ex) { System.out.println(ex); } } public void deleteRoomWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); try { roomsClient.deleteRoom("<Room Id>"); } catch (RuntimeException ex) { System.out.println(ex); } } public void addOrUpdateRoomParticipantsWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); List<RoomParticipant> participantsToaddOrUpdate = new ArrayList<>(); RoomParticipant participantToAdd = new RoomParticipant(new CommunicationUserIdentifier("<ACS User MRI identity 3>")).setRole(ParticipantRole.ATTENDEE); participant2 = new RoomParticipant(new CommunicationUserIdentifier("<ACS User MRI identity 2>")).setRole(ParticipantRole.ATTENDEE); participantsToaddOrUpdate.add(participantToAdd); participantsToaddOrUpdate.add(participant2); try { AddOrUpdateParticipantsResult addOrUpdateResult = roomsClient.addOrUpdateParticipants("<Room Id>", participantsToaddOrUpdate); } catch (RuntimeException ex) { System.out.println(ex); } } public void removeRoomParticipantsWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); List<CommunicationIdentifier> participantsToRemove = new ArrayList<>(); participantsToRemove.add(participant1.getCommunicationIdentifier()); participantsToRemove.add(participant2.getCommunicationIdentifier()); try { RemoveParticipantsResult removeResult = roomsClient.removeParticipants("<Room Id>", participantsToRemove); } catch (RuntimeException ex) { System.out.println(ex); } } }
Great suggestion, added!
public void listRoomParticipantsWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); try { PagedIterable<RoomParticipant> allParticipants = roomsClient.listParticipants("<Room Id>"); } catch (RuntimeException ex) { System.out.println(ex); } }
PagedIterable<RoomParticipant> allParticipants = roomsClient.listParticipants("<Room Id>");
public void listRoomParticipantsWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); try { PagedIterable<RoomParticipant> allParticipants = roomsClient.listParticipants("<Room Id>"); for (RoomParticipant participant : allParticipants) { System.out.println(participant.getCommunicationIdentifier().getRawId() + " (" + participant.getRole() + ")"); } } catch (RuntimeException ex) { System.out.println(ex); } }
class ReadmeSamples { RoomParticipant participant1; RoomParticipant participant2; public RoomsClient createRoomsClientUsingAzureKeyCredential() { String endpoint = "https: AzureKeyCredential azureKeyCredential = new AzureKeyCredential("<access-key>"); RoomsClient roomsClient = new RoomsClientBuilder().endpoint(endpoint).credential(azureKeyCredential) .buildClient(); return roomsClient; } public RoomsAsyncClient createRoomsAsyncClientUsingAzureKeyCredential() { String endpoint = "https: AzureKeyCredential azureKeyCredential = new AzureKeyCredential("<access-key>"); RoomsAsyncClient roomsClient = new RoomsClientBuilder().endpoint(endpoint).credential(azureKeyCredential) .buildAsyncClient(); return roomsClient; } public RoomsClient createRoomsClientWithConnectionString() { String connectionString = "https: RoomsClient roomsClient = new RoomsClientBuilder().connectionString(connectionString).buildClient(); return roomsClient; } public RoomsClient createRoomsClientWithAAD() { String endpoint = "https: RoomsClient roomsClient = new RoomsClientBuilder().endpoint(endpoint) .credential(new DefaultAzureCredentialBuilder().build()).buildClient(); return roomsClient; } public RoomsClient createSyncClientUsingTokenCredential() { TokenCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); String endpoint = "https: RoomsClient roomsClient = new RoomsClientBuilder().endpoint(endpoint).credential(tokenCredential).buildClient(); return roomsClient; } public void createRoomWithValidInput() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); OffsetDateTime validFrom = OffsetDateTime.now(); OffsetDateTime validUntil = validFrom.plusDays(30); List<RoomParticipant> participants = new ArrayList<>(); participant1 = new RoomParticipant(new CommunicationUserIdentifier("<ACS User MRI identity 1>")).setRole(ParticipantRole.ATTENDEE); participant2 = new RoomParticipant(new CommunicationUserIdentifier("<ACS User MRI identity 2>")).setRole(ParticipantRole.CONSUMER); participants.add(participant1); participants.add(participant2); CreateRoomOptions roomOptions = new CreateRoomOptions() .setValidFrom(validFrom) .setValidUntil(validUntil) .setParticipants(participants); CommunicationRoom roomResult = roomsClient.createRoom(roomOptions); System.out.println("Room Id: " + roomResult.getRoomId()); } public void updateRoomWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); OffsetDateTime validFrom = OffsetDateTime.now(); OffsetDateTime validUntil = validFrom.plusDays(30); UpdateRoomOptions updateRoomOptions = new UpdateRoomOptions() .setValidFrom(validFrom) .setValidUntil(validUntil); try { CommunicationRoom roomResult = roomsClient.updateRoom("<Room Id>", updateRoomOptions); System.out.println("Room Id: " + roomResult.getRoomId()); } catch (RuntimeException ex) { System.out.println(ex); } } public void getRoomWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); try { CommunicationRoom roomResult = roomsClient.getRoom("<Room Id>"); System.out.println("Room Id: " + roomResult.getRoomId()); } catch (RuntimeException ex) { System.out.println(ex); } } public void deleteRoomWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); try { roomsClient.deleteRoom("<Room Id>"); } catch (RuntimeException ex) { System.out.println(ex); } } public void addOrUpdateRoomParticipantsWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); List<RoomParticipant> participantsToaddOrUpdate = new ArrayList<>(); RoomParticipant participantToAdd = new RoomParticipant(new CommunicationUserIdentifier("<ACS User MRI identity 3>")).setRole(ParticipantRole.ATTENDEE); participant2 = new RoomParticipant(new CommunicationUserIdentifier("<ACS User MRI identity 2>")).setRole(ParticipantRole.ATTENDEE); participantsToaddOrUpdate.add(participantToAdd); participantsToaddOrUpdate.add(participant2); try { AddOrUpdateParticipantsResult addOrUpdateResult = roomsClient.addOrUpdateParticipants("<Room Id>", participantsToaddOrUpdate); } catch (RuntimeException ex) { System.out.println(ex); } } public void removeRoomParticipantsWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); List<CommunicationIdentifier> participantsToRemove = new ArrayList<>(); participantsToRemove.add(participant1.getCommunicationIdentifier()); participantsToRemove.add(participant2.getCommunicationIdentifier()); try { RemoveParticipantsResult removeResult = roomsClient.removeParticipants("<Room Id>", participantsToRemove); } catch (RuntimeException ex) { System.out.println(ex); } } }
class ReadmeSamples { RoomParticipant participant1; RoomParticipant participant2; public RoomsClient createRoomsClientUsingAzureKeyCredential() { String endpoint = "https: AzureKeyCredential azureKeyCredential = new AzureKeyCredential("<access-key>"); RoomsClient roomsClient = new RoomsClientBuilder().endpoint(endpoint).credential(azureKeyCredential) .buildClient(); return roomsClient; } public RoomsAsyncClient createRoomsAsyncClientUsingAzureKeyCredential() { String endpoint = "https: AzureKeyCredential azureKeyCredential = new AzureKeyCredential("<access-key>"); RoomsAsyncClient roomsClient = new RoomsClientBuilder().endpoint(endpoint).credential(azureKeyCredential) .buildAsyncClient(); return roomsClient; } public RoomsClient createRoomsClientWithConnectionString() { String connectionString = "https: RoomsClient roomsClient = new RoomsClientBuilder().connectionString(connectionString).buildClient(); return roomsClient; } public RoomsClient createRoomsClientWithAAD() { String endpoint = "https: RoomsClient roomsClient = new RoomsClientBuilder().endpoint(endpoint) .credential(new DefaultAzureCredentialBuilder().build()).buildClient(); return roomsClient; } public RoomsClient createSyncClientUsingTokenCredential() { TokenCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); String endpoint = "https: RoomsClient roomsClient = new RoomsClientBuilder().endpoint(endpoint).credential(tokenCredential).buildClient(); return roomsClient; } public void createRoomWithValidInput() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); OffsetDateTime validFrom = OffsetDateTime.now(); OffsetDateTime validUntil = validFrom.plusDays(30); List<RoomParticipant> participants = new ArrayList<>(); participant1 = new RoomParticipant(new CommunicationUserIdentifier("<ACS User MRI identity 1>")).setRole(ParticipantRole.ATTENDEE); participant2 = new RoomParticipant(new CommunicationUserIdentifier("<ACS User MRI identity 2>")).setRole(ParticipantRole.CONSUMER); participants.add(participant1); participants.add(participant2); CreateRoomOptions roomOptions = new CreateRoomOptions() .setValidFrom(validFrom) .setValidUntil(validUntil) .setParticipants(participants); CommunicationRoom roomResult = roomsClient.createRoom(roomOptions); System.out.println("Room Id: " + roomResult.getRoomId()); } public void updateRoomWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); OffsetDateTime validFrom = OffsetDateTime.now(); OffsetDateTime validUntil = validFrom.plusDays(30); UpdateRoomOptions updateRoomOptions = new UpdateRoomOptions() .setValidFrom(validFrom) .setValidUntil(validUntil); try { CommunicationRoom roomResult = roomsClient.updateRoom("<Room Id>", updateRoomOptions); System.out.println("Room Id: " + roomResult.getRoomId()); } catch (RuntimeException ex) { System.out.println(ex); } } public void getRoomWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); try { CommunicationRoom roomResult = roomsClient.getRoom("<Room Id>"); System.out.println("Room Id: " + roomResult.getRoomId()); } catch (RuntimeException ex) { System.out.println(ex); } } public void deleteRoomWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); try { roomsClient.deleteRoom("<Room Id>"); } catch (RuntimeException ex) { System.out.println(ex); } } public void addOrUpdateRoomParticipantsWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); List<RoomParticipant> participantsToaddOrUpdate = new ArrayList<>(); RoomParticipant participantToAdd = new RoomParticipant(new CommunicationUserIdentifier("<ACS User MRI identity 3>")).setRole(ParticipantRole.ATTENDEE); participant2 = new RoomParticipant(new CommunicationUserIdentifier("<ACS User MRI identity 2>")).setRole(ParticipantRole.ATTENDEE); participantsToaddOrUpdate.add(participantToAdd); participantsToaddOrUpdate.add(participant2); try { AddOrUpdateParticipantsResult addOrUpdateResult = roomsClient.addOrUpdateParticipants("<Room Id>", participantsToaddOrUpdate); } catch (RuntimeException ex) { System.out.println(ex); } } public void removeRoomParticipantsWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); List<CommunicationIdentifier> participantsToRemove = new ArrayList<>(); participantsToRemove.add(participant1.getCommunicationIdentifier()); participantsToRemove.add(participant2.getCommunicationIdentifier()); try { RemoveParticipantsResult removeResult = roomsClient.removeParticipants("<Room Id>", participantsToRemove); } catch (RuntimeException ex) { System.out.println(ex); } } }
without it, 2.15 throws NPE later on
private String removePrefix(Method method) { MapperConfig<?> config = mapper.getSerializationConfig(); AnnotatedClass annotatedClass = AnnotatedClassResolver.resolve(config, mapper.constructType(method.getDeclaringClass()), null); AnnotatedMethod annotatedMethod = new AnnotatedMethod(annotatedClass, method, null, null); String annotatedMethodName = annotatedMethod.getName(); String name = null; if (useJackson212 && jackson212IsSafe) { try { name = JacksonDatabind212.removePrefix(config, annotatedClass, annotatedMethod, annotatedMethodName); } catch (Throwable ex) { if (ex instanceof LinkageError) { jackson212IsSafe = false; LOGGER.log(LogLevel.VERBOSE, JacksonVersion::getHelpInfo, ex); } throw ex; } } if (name == null) { name = removePrefixWithBeanUtils(annotatedMethod); } return name; }
AnnotatedMethod annotatedMethod = new AnnotatedMethod(annotatedClass, method, null, null);
private String removePrefix(Method method) { MapperConfig<?> config = mapper.getSerializationConfig(); AnnotatedClass annotatedClass = AnnotatedClassResolver.resolve(config, mapper.constructType(method.getDeclaringClass()), null); AnnotatedMethod annotatedMethod = new AnnotatedMethod(annotatedClass, method, null, null); String annotatedMethodName = annotatedMethod.getName(); String name = null; if (useJackson212 && jackson212IsSafe) { try { name = JacksonDatabind212.removePrefix(config, annotatedClass, annotatedMethod, annotatedMethodName); } catch (Throwable ex) { if (ex instanceof LinkageError) { jackson212IsSafe = false; LOGGER.log(LogLevel.VERBOSE, JacksonVersion::getHelpInfo, ex); } throw ex; } } if (name == null) { name = removePrefixWithBeanUtils(annotatedMethod); } return name; }
class MemberNameConverterImpl implements MemberNameConverter { private static final ClientLogger LOGGER = new ClientLogger(MemberNameConverterImpl.class); private final ObjectMapper mapper; final boolean useJackson212; private boolean jackson212IsSafe = true; MemberNameConverterImpl(ObjectMapper mapper) { this.mapper = mapper; this.useJackson212 = PackageVersion.VERSION.getMinorVersion() >= 12; } @Override public String convertMemberName(Member member) { if (Modifier.isTransient(member.getModifiers())) { return null; } VisibilityChecker<?> visibilityChecker = mapper.getVisibilityChecker(); if (member instanceof Field) { Field f = (Field) member; if (f.isAnnotationPresent(JsonIgnore.class) || !visibilityChecker.isFieldVisible(f)) { if (f.isAnnotationPresent(JsonProperty.class)) { LOGGER.atInfo() .addKeyValue("field", f.getName()) .log("Field is annotated with JsonProperty but isn't accessible to JacksonJsonSerializer."); } return null; } if (f.isAnnotationPresent(JsonProperty.class)) { String propertyName = f.getDeclaredAnnotation(JsonProperty.class).value(); return CoreUtils.isNullOrEmpty(propertyName) ? f.getName() : propertyName; } return f.getName(); } if (member instanceof Method) { Method m = (Method) member; /* * If the method isn't a getter, is annotated with JsonIgnore, or isn't visible to the ObjectMapper ignore * it. */ if (!verifyGetter(m) || m.isAnnotationPresent(JsonIgnore.class) || !visibilityChecker.isGetterVisible(m)) { if (m.isAnnotationPresent(JsonGetter.class) || m.isAnnotationPresent(JsonProperty.class)) { LOGGER.atInfo() .addKeyValue("method", m.getName()) .log("Method is annotated with either JsonGetter or JsonProperty but isn't accessible to JacksonJsonSerializer."); } return null; } String methodNameWithoutJavaBeans = removePrefix(m); /* * Prefer JsonGetter over JsonProperty as it is the more targeted annotation. */ if (m.isAnnotationPresent(JsonGetter.class)) { String propertyName = m.getDeclaredAnnotation(JsonGetter.class).value(); return CoreUtils.isNullOrEmpty(propertyName) ? methodNameWithoutJavaBeans : propertyName; } if (m.isAnnotationPresent(JsonProperty.class)) { String propertyName = m.getDeclaredAnnotation(JsonProperty.class).value(); return CoreUtils.isNullOrEmpty(propertyName) ? methodNameWithoutJavaBeans : propertyName; } return methodNameWithoutJavaBeans; } return null; } /* * Only consider methods that don't have parameters and aren't void as valid getter methods. */ private static boolean verifyGetter(Method method) { Class<?> returnType = method.getReturnType(); return method.getParameterCount() == 0 && returnType != void.class && returnType != Void.class; } @SuppressWarnings("deprecation") private static String removePrefixWithBeanUtils(AnnotatedMethod annotatedMethod) { return BeanUtil.okNameForGetter(annotatedMethod, false); } }
class MemberNameConverterImpl implements MemberNameConverter { private static final ClientLogger LOGGER = new ClientLogger(MemberNameConverterImpl.class); private final ObjectMapper mapper; final boolean useJackson212; private boolean jackson212IsSafe = true; MemberNameConverterImpl(ObjectMapper mapper) { this.mapper = mapper; this.useJackson212 = PackageVersion.VERSION.getMinorVersion() >= 12; } @Override public String convertMemberName(Member member) { if (Modifier.isTransient(member.getModifiers())) { return null; } VisibilityChecker<?> visibilityChecker = mapper.getVisibilityChecker(); if (member instanceof Field) { Field f = (Field) member; if (f.isAnnotationPresent(JsonIgnore.class) || !visibilityChecker.isFieldVisible(f)) { if (f.isAnnotationPresent(JsonProperty.class)) { LOGGER.atInfo() .addKeyValue("field", f.getName()) .log("Field is annotated with JsonProperty but isn't accessible to JacksonJsonSerializer."); } return null; } if (f.isAnnotationPresent(JsonProperty.class)) { String propertyName = f.getDeclaredAnnotation(JsonProperty.class).value(); return CoreUtils.isNullOrEmpty(propertyName) ? f.getName() : propertyName; } return f.getName(); } if (member instanceof Method) { Method m = (Method) member; /* * If the method isn't a getter, is annotated with JsonIgnore, or isn't visible to the ObjectMapper ignore * it. */ if (!verifyGetter(m) || m.isAnnotationPresent(JsonIgnore.class) || !visibilityChecker.isGetterVisible(m)) { if (m.isAnnotationPresent(JsonGetter.class) || m.isAnnotationPresent(JsonProperty.class)) { LOGGER.atInfo() .addKeyValue("method", m.getName()) .log("Method is annotated with either JsonGetter or JsonProperty but isn't accessible to JacksonJsonSerializer."); } return null; } String methodNameWithoutJavaBeans = removePrefix(m); /* * Prefer JsonGetter over JsonProperty as it is the more targeted annotation. */ if (m.isAnnotationPresent(JsonGetter.class)) { String propertyName = m.getDeclaredAnnotation(JsonGetter.class).value(); return CoreUtils.isNullOrEmpty(propertyName) ? methodNameWithoutJavaBeans : propertyName; } if (m.isAnnotationPresent(JsonProperty.class)) { String propertyName = m.getDeclaredAnnotation(JsonProperty.class).value(); return CoreUtils.isNullOrEmpty(propertyName) ? methodNameWithoutJavaBeans : propertyName; } return methodNameWithoutJavaBeans; } return null; } /* * Only consider methods that don't have parameters and aren't void as valid getter methods. */ private static boolean verifyGetter(Method method) { Class<?> returnType = method.getReturnType(); return method.getParameterCount() == 0 && returnType != void.class && returnType != Void.class; } @SuppressWarnings("deprecation") private static String removePrefixWithBeanUtils(AnnotatedMethod annotatedMethod) { return BeanUtil.okNameForGetter(annotatedMethod, false); } }
hacky fix for [test failure](https://dev.azure.com/azure-sdk/public/_build/results?buildId=2744328&view=logs&j=a4d29859-3a3e-58e9-1e57-ab36931b8c3d&t=380f79dc-1fa7-5f4f-96cd-1232e7eff958) ```JacksonJsonReaderContractTests>JsonReaderContractTests.readUntypedPreventsStackOverflow:559 Unexpected exception type thrown, expected: <java.lang.IllegalStateException> but was: <com.fasterxml.jackson.core.exc.StreamConstraintsException>``` Jackson now throws `com.fasterxml.jackson.core.exc.StreamConstraintsException: Depth (1001) exceeds the maximum allowed nesting depth (1000)`, and we have 1001 nested calls here. The new exception was introduced in 2.15 and it'd be not so trivial to catch and rethrow as `IllegalArgumentException`
private Object readUntypedHelper(int depth) throws IOException { if (depth >= 999) { throw new IllegalStateException("Untyped object exceeded allowed object nested depth of 1000."); } JsonToken token = currentToken(); if (token == JsonToken.NULL || token == null) { return null; } else if (token == JsonToken.BOOLEAN) { return getBoolean(); } else if (token == JsonToken.NUMBER) { String numberText = getText(); if ("INF".equals(numberText) || "Infinity".equals(numberText) || "-INF".equals(numberText) || "-Infinity".equals(numberText) || "NaN".equals(numberText)) { return numberText; } else if (numberText.contains(".")) { return Double.parseDouble(numberText); } else { try { return Integer.parseInt(numberText); } catch (NumberFormatException ex) { return Long.parseLong(numberText); } } } else if (token == JsonToken.STRING) { return getString(); } else if (token == JsonToken.START_ARRAY) { List<Object> array = new ArrayList<>(); while (nextToken() != JsonToken.END_ARRAY) { array.add(readUntypedHelper(depth + 1)); } return array; } else if (token == JsonToken.START_OBJECT) { Map<String, Object> object = new LinkedHashMap<>(); while (nextToken() != JsonToken.END_OBJECT) { String fieldName = getFieldName(); nextToken(); Object value = readUntypedHelper(depth + 1); object.put(fieldName, value); } return object; } throw new IllegalStateException("Unknown token type while reading an untyped field: " + token); }
if (depth >= 999) {
private Object readUntypedHelper(int depth) throws IOException { if (depth >= 999) { throw new IllegalStateException("Untyped object exceeded allowed object nested depth of 1000."); } JsonToken token = currentToken(); if (token == JsonToken.NULL || token == null) { return null; } else if (token == JsonToken.BOOLEAN) { return getBoolean(); } else if (token == JsonToken.NUMBER) { String numberText = getText(); if ("INF".equals(numberText) || "Infinity".equals(numberText) || "-INF".equals(numberText) || "-Infinity".equals(numberText) || "NaN".equals(numberText)) { return numberText; } else if (numberText.contains(".")) { return Double.parseDouble(numberText); } else { try { return Integer.parseInt(numberText); } catch (NumberFormatException ex) { return Long.parseLong(numberText); } } } else if (token == JsonToken.STRING) { return getString(); } else if (token == JsonToken.START_ARRAY) { List<Object> array = new ArrayList<>(); while (nextToken() != JsonToken.END_ARRAY) { array.add(readUntypedHelper(depth + 1)); } return array; } else if (token == JsonToken.START_OBJECT) { Map<String, Object> object = new LinkedHashMap<>(); while (nextToken() != JsonToken.END_OBJECT) { String fieldName = getFieldName(); nextToken(); Object value = readUntypedHelper(depth + 1); object.put(fieldName, value); } return object; } throw new IllegalStateException("Unknown token type while reading an untyped field: " + token); }
class JsonReader implements Closeable { private static final JsonStringEncoder ENCODER = JsonStringEncoder.getInstance(); /** * Creates an instance of {@link JsonReader}. */ public JsonReader() { } /** * Gets the {@link JsonToken} that the reader currently points. * <p> * Returns null if the reader isn't pointing to a token. This happens if the reader hasn't begun to read the JSON * value or if reading of the JSON value has completed. * * @return The {@link JsonToken} that the reader currently points, or null if the reader isn't pointing to a token. */ public abstract JsonToken currentToken(); /** * Iterates to and returns the next {@link JsonToken} in the JSON encoded value. * <p> * Returns null if iterating to the next token completes reading of the JSON encoded value. * * @return The next {@link JsonToken} in the JSON encoded value, or null if reading completes. * @throws IOException If the next token cannot be determined. */ public abstract JsonToken nextToken() throws IOException; /** * Closes the JSON stream. * * @throws IOException If the underlying content store fails to close. */ @Override public abstract void close() throws IOException; /** * Whether the {@link * * @return Whether the {@link */ public final boolean isStartArrayOrObject() { return isStartArrayOrObject(currentToken()); } private static boolean isStartArrayOrObject(JsonToken token) { return token == JsonToken.START_ARRAY || token == JsonToken.START_OBJECT; } /** * Whether the {@link * * @return Whether the {@link */ public final boolean isEndArrayOrObject() { return isEndArrayOrObject(currentToken()); } private static boolean isEndArrayOrObject(JsonToken token) { return token == JsonToken.END_ARRAY || token == JsonToken.END_OBJECT; } /** * Gets the binary value if the reader is currently pointing to a {@link JsonToken * <p> * This returns the equivalent of {@link Base64 * <p> * If the reader is pointing to a {@link JsonToken * other token type an {@link IllegalStateException} will be thrown. * * @return The binary value based on whether the current token is {@link JsonToken * {@link JsonToken * @throws IllegalStateException If the reader isn't pointing to either {@link JsonToken * {@link JsonToken * @throws IOException If the next value cannot be read as binary. */ public abstract byte[] getBinary() throws IOException; /** * Gets the boolean value if the reader is currently pointing to a {@link JsonToken * <p> * If the reader is pointing to any other token type an {@link IllegalStateException} will be thrown. * <p> * If {@link Boolean} should be read use {@link * * @return The boolean value based on the {@link JsonToken * @throws IllegalStateException If the reader isn't pointing to {@link JsonToken * @throws IOException If the next value cannot be read as a boolean. */ public abstract boolean getBoolean() throws IOException; /** * Gets the float value if the reader is currently pointing to a {@link JsonToken * {@link JsonToken * <p> * {@link JsonToken * converted to a float. * <p> * All other {@link JsonToken} types will throw an {@link IllegalStateException}. * <p> * If {@link Float} should be read use {@link * * @return The float value based on the current token. * @throws NumberFormatException If the current token is a {@link JsonToken * float. * @throws IllegalStateException If the current token isn't a {@link JsonToken * @throws IOException If the next value cannot be read as a float. */ public abstract float getFloat() throws IOException; /** * Gets the double value if the reader is currently pointing to a {@link JsonToken * {@link JsonToken * <p> * {@link JsonToken * converted to a double. * <p> * All other {@link JsonToken} types will throw an {@link IllegalStateException}. * <p> * If {@link Double} should be read use {@link * * @return The double value based on the current token. * @throws NumberFormatException If the current token is a {@link JsonToken * double. * @throws IllegalStateException If the current token isn't a {@link JsonToken * @throws IOException If the next value cannot be read as a double. */ public abstract double getDouble() throws IOException; /** * Gets the int value if the reader is currently pointing to a {@link JsonToken * {@link JsonToken * <p> * {@link JsonToken * converted to an int. * <p> * All other {@link JsonToken} types will throw an {@link IllegalStateException}. * <p> * If {@link Integer} should be read use {@link * * @return The int value based on the current token. * @throws NumberFormatException If the current token is a {@link JsonToken * int. * @throws IllegalStateException If the current token isn't a {@link JsonToken * @throws IOException If the next value cannot be read as an int. */ public abstract int getInt() throws IOException; /** * Gets the long value if the reader is currently pointing to a {@link JsonToken * {@link JsonToken * <p> * {@link JsonToken * converted to a long. * <p> * All other {@link JsonToken} types will throw an {@link IllegalStateException}. * <p> * If {@link Long} should be read use {@link * * @return The long value based on the current token. * @throws NumberFormatException If the current token is a {@link JsonToken * long. * @throws IllegalStateException If the current token isn't a {@link JsonToken * @throws IOException If the next value cannot be read as a long. */ public abstract long getLong() throws IOException; /** * Gets the String value if the reader is currently pointing to a {@link JsonToken * {@link JsonToken * <p> * If the current token is a {@link JsonToken * value will be returned. If the current token is {@link JsonToken * <p> * All other {@link JsonToken} types will throw an {@link IllegalStateException}. * * @return The String value based on the current token. * @throws IllegalStateException If the current token isn't a {@link JsonToken * {@link JsonToken * @throws IOException If the next value cannot be read as a String. */ public abstract String getString() throws IOException; /** * Gets the field name if the reader is currently pointing to a {@link JsonToken * <p> * All other {@link JsonToken} types will throw an {@link IllegalStateException}. * * @return The field name based on the current token. * @throws IllegalStateException If the current token isn't a {@link JsonToken * @throws IOException If the next value cannot be read as a field name. */ public abstract String getFieldName() throws IOException; /** * Convenience method to read a nullable type. * <p> * If the {@link * will be passed into the {@code nonNullGetter} function to get the value. Effectively, this is the generic form of * the get*NullableValue methods. * * @param nonNullGetter Function that reads the non-null JSON value. * @param <T> Type returned by the function. * @return null if the {@link * {@code nonNullGetter}. * @throws IOException If the next value cannot be read as a nullable. */ public final <T> T getNullable(ReadValueCallback<JsonReader, T> nonNullGetter) throws IOException { return currentToken() == JsonToken.NULL ? null : nonNullGetter.read(this); } /** * Recursively skips the JSON token sub-stream if the current token is either {@link JsonToken * {@link JsonToken * <p> * If the current token isn't the beginning of an array or object this method is a no-op. * * @throws IOException If the children cannot be skipped. */ public abstract void skipChildren() throws IOException; /** * Reads and returns the current JSON object the {@link JsonReader} is pointing to. This will mutate the current * location of this {@link JsonReader}. * <p> * If the {@link * {@link IllegalStateException} will be thrown. * <p> * If the {@link * JSON field is the {@link * create a new JSON object with only a subset of fields (those remaining from when the method is called). * <p> * The returned {@link JsonReader} is able to be {@link * * @return The buffered JSON object the {@link JsonReader} was pointing to. * @throws IllegalStateException If the {@link * {@link JsonToken * @throws IOException If the child object cannot be buffered. */ public abstract JsonReader bufferObject() throws IOException; /** * Indicates whether the {@link JsonReader} supports {@link * * @return Whether {@link */ public abstract boolean isResetSupported(); /** * Creates a new {@link JsonReader} reset to the beginning of the JSON stream. * <p> * Use {@link * and it isn't supported an {@link IllegalStateException} will be thrown. * * @return A new {@link JsonReader} reset to the beginning of the JSON stream. * @throws IllegalStateException If resetting isn't supported by the current JsonReader. * @throws IOException If the {@link JsonReader} cannot be reset. */ public abstract JsonReader reset() throws IOException; /** * Recursively reads the JSON token sub-stream if the current token is either {@link JsonToken * {@link JsonToken * <p> * If the {@link * be read. * * @return The raw textual value of the JSON token sub-stream. * @throws IOException If the children cannot be read. */ public final String readChildren() throws IOException { return readInternal(new StringBuilder(), true, false).toString(); } /** * Recursively reads the JSON token sub-stream if the current token is either {@link JsonToken * {@link JsonToken * <p> * If the {@link * be read. * * @param buffer The {@link StringBuilder} where the read sub-stream will be written. * @throws NullPointerException If {@code buffer} is null. * @throws IOException If the children cannot be read. */ public final void readChildren(StringBuilder buffer) throws IOException { readInternal(buffer, true, false); } /** * Reads the remaining fields in the current JSON object as a JSON object. * <p> * If the {@link * {@link * object where the first field is the current field and reads the remaining fields in the JSON object. * <p> * If the {@link * be read. * * @return The raw textual value of the remaining JSON fields. * @throws IOException If the remaining JSON fields cannot be read. */ public final String readRemainingFieldsAsJsonObject() throws IOException { return readInternal(new StringBuilder(), false, true).toString(); } /** * Reads the remaining fields in the current JSON object as a JSON object. * <p> * If the {@link * {@link * a JSON object where the first field is the current field and reads the remaining fields in the JSON object. * <p> * If the {@link * be read. * * @param buffer The {@link StringBuilder} where the remaining JSON fields will be written. * @throws NullPointerException If {@code buffer} is null. * @throws IOException If the remaining JSON fields cannot be read. */ public final void readRemainingFieldsAsJsonObject(StringBuilder buffer) throws IOException { readInternal(buffer, false, true); } private StringBuilder readInternal(StringBuilder buffer, boolean canStartAtArray, boolean canStartAtFieldName) throws IOException { Objects.requireNonNull(buffer, "The 'buffer' used to read the JSON object cannot be null."); JsonToken token = currentToken(); boolean canRead = (token == JsonToken.START_OBJECT) || (canStartAtArray && token == JsonToken.START_ARRAY) || (canStartAtFieldName && token == JsonToken.FIELD_NAME); if (!canRead) { return buffer; } if (token == JsonToken.FIELD_NAME) { buffer.append("{\""); ENCODER.quoteAsString(getFieldName(), buffer); buffer.append("\":"); token = nextToken(); } appendJson(buffer, token); int depth = 1; while (depth > 0) { JsonToken previousToken = token; token = nextToken(); if (isStartArrayOrObject(token)) { depth++; } else if (isEndArrayOrObject(token)) { depth--; } else if (token == null) { return buffer; } if (!(isStartArrayOrObject(previousToken) || isEndArrayOrObject(token) || previousToken == JsonToken.FIELD_NAME)) { buffer.append(','); } appendJson(buffer, token); } return buffer; } /** * Convenience method to read a JSON element into a buffer. * * @param buffer The buffer where the JSON element value will be written. * @param token The type of the JSON element. * @throws IOException If an error occurs while reading the JSON element. */ private void appendJson(StringBuilder buffer, JsonToken token) throws IOException { if (token == JsonToken.FIELD_NAME) { buffer.append("\""); ENCODER.quoteAsString(getFieldName(), buffer); buffer.append("\":"); } else if (token == JsonToken.STRING) { buffer.append("\""); ENCODER.quoteAsString(getString(), buffer); buffer.append("\""); } else { buffer.append(getText()); } } /** * Reads a JSON object. * <p> * If the {@link * is still null or {@link JsonToken * {@link JsonToken * <p> * Once the JSON stream is prepared for object reading this will get the next token and pass this {@link JsonReader} * into the {@code objectReaderFunc} to handle reading the object. * <p> * If a JSON array should be read use {@link * {@link * * @param objectReaderFunc Function that reads each value of the key-value pair. * @param <T> The value element type. * @return The read JSON object, or null if the {@link JsonToken} is null or {@link JsonToken * @throws IllegalStateException If the token isn't {@link JsonToken * @throws IOException If the object cannot be read. */ public final <T> T readObject(ReadValueCallback<JsonReader, T> objectReaderFunc) throws IOException { return readMapOrObject(objectReaderFunc, false); } /** * Reads a JSON array. * <p> * If the {@link * is still null or {@link JsonToken * {@link JsonToken * <p> * Once the JSON stream is prepared for element reading this will get the element token and pass this * {@link JsonReader} into the {@code elementReaderFunc} to handle reading the element of the array. If the array * has no elements an empty list will be returned. * <p> * If a JSON object should be read use {@link * {@link * * @param elementReaderFunc Function that reads each element of the array. * @param <T> The array element type. * @return The read JSON array, or null if the {@link JsonToken} is null or {@link JsonToken * @throws IllegalStateException If the token isn't {@link JsonToken * @throws IOException If the array cannot be read. */ public final <T> List<T> readArray(ReadValueCallback<JsonReader, T> elementReaderFunc) throws IOException { JsonToken currentToken = currentToken(); if (currentToken == null) { currentToken = nextToken(); } if (currentToken == JsonToken.NULL || currentToken == null) { return null; } else if (currentToken != JsonToken.START_ARRAY) { throw new IllegalStateException("Unexpected token to begin array deserialization: " + currentToken); } List<T> array = new LinkedList<>(); while (nextToken() != JsonToken.END_ARRAY) { array.add(elementReaderFunc.read(this)); } return array; } /** * Reads a JSON map. * <p> * If the {@link * is still null or {@link JsonToken * {@link JsonToken * <p> * Once the JSON stream is prepared for key-value reading this will get the next token and read the field name as * the key then get the next token after that and pass this {@link JsonReader} into the {@code valueReaderFunc} to * handle reading the value of the key-value pair. If the object has no elements an empty map will be returned. * <p> * If a JSON object should be read use {@link * {@link * * @param valueReaderFunc Function that reads each value of the key-value pair. * @param <T> The value element type. * @return The read JSON map, or null if the {@link JsonToken} is null or {@link JsonToken * @throws IllegalStateException If the token isn't {@link JsonToken * @throws IOException If the map cannot be read. */ public final <T> Map<String, T> readMap(ReadValueCallback<JsonReader, T> valueReaderFunc) throws IOException { return readMapOrObject(reader -> { Map<String, T> map = new LinkedHashMap<>(); while (nextToken() != JsonToken.END_OBJECT) { String fieldName = getFieldName(); nextToken(); map.put(fieldName, valueReaderFunc.read(this)); } return map; }, true); } private <T> T readMapOrObject(ReadValueCallback<JsonReader, T> valueReaderFunc, boolean isMap) throws IOException { JsonToken currentToken = currentToken(); if (currentToken == null) { currentToken = nextToken(); } if (currentToken == JsonToken.NULL || currentToken == null) { return null; } else if (currentToken != JsonToken.START_OBJECT) { String type = isMap ? "map" : "object"; throw new IllegalStateException("Unexpected token to begin " + type + " deserialization: " + currentToken); } return valueReaderFunc.read(this); } /** * Reads an untyped object. * <p> * If the {@link * <p> * If the starting token is {@link JsonToken * {@link JsonToken * for reading an unknown type. If the untyped object is deeply nested an {@link IllegalStateException} will also be * thrown to prevent a stack overflow exception. * <p> * The returned object will be one of the following: * * <ul> * <li>null if the starting token is null or {@link JsonToken * <li>true or false if the starting token is {@link JsonToken * <li>One of int, long, float, or double is the starting token is {@link JsonToken * containing value will be used if the number is an integer</li> * <li>An array of untyped elements if the starting point is {@link JsonToken * <li>A map of String-untyped value if the starting point is {@link JsonToken * </ul> * * @return The untyped value based on the outlined return types above. * @throws IllegalStateException If the starting point of the object is {@link JsonToken * {@link JsonToken * @throws IOException If the untyped cannot be read. */ public final Object readUntyped() throws IOException { JsonToken token = currentToken(); if (token == null) { token = nextToken(); } if (token == JsonToken.END_ARRAY || token == JsonToken.END_OBJECT || token == JsonToken.FIELD_NAME) { throw new IllegalStateException("Unexpected token to begin an untyped field: " + token); } return readUntypedHelper(0); } /** * Gets the text value for the {@link * <p> * The following is how each {@link JsonToken} type is handled: * * <ul> * <li>{@link JsonToken * <li>{@link JsonToken * <li>{@link JsonToken * <li>{@link JsonToken * <li>{@link JsonToken * <li>{@link JsonToken * <li>{@link JsonToken * <li>{@link JsonToken * <li>{@link JsonToken * </ul> * * If the current token is null an {@link IllegalStateException} will be thrown. * * @return The text value for the {@link * @throws IllegalStateException If the current token is null. * @throws IOException If the text cannot be read. */ public final String getText() throws IOException { JsonToken token = currentToken(); if (token == null) { throw new IllegalStateException("Current token cannot be null."); } switch (token) { case START_OBJECT: return "{"; case END_OBJECT: return "}"; case START_ARRAY: return "["; case END_ARRAY: return "]"; case FIELD_NAME: return getFieldName(); case BOOLEAN: return String.valueOf(getBoolean()); case NUMBER: case STRING: return getString(); case NULL: return "null"; default: return ""; } } }
class JsonReader implements Closeable { private static final JsonStringEncoder ENCODER = JsonStringEncoder.getInstance(); /** * Creates an instance of {@link JsonReader}. */ public JsonReader() { } /** * Gets the {@link JsonToken} that the reader currently points. * <p> * Returns null if the reader isn't pointing to a token. This happens if the reader hasn't begun to read the JSON * value or if reading of the JSON value has completed. * * @return The {@link JsonToken} that the reader currently points, or null if the reader isn't pointing to a token. */ public abstract JsonToken currentToken(); /** * Iterates to and returns the next {@link JsonToken} in the JSON encoded value. * <p> * Returns null if iterating to the next token completes reading of the JSON encoded value. * * @return The next {@link JsonToken} in the JSON encoded value, or null if reading completes. * @throws IOException If the next token cannot be determined. */ public abstract JsonToken nextToken() throws IOException; /** * Closes the JSON stream. * * @throws IOException If the underlying content store fails to close. */ @Override public abstract void close() throws IOException; /** * Whether the {@link * * @return Whether the {@link */ public final boolean isStartArrayOrObject() { return isStartArrayOrObject(currentToken()); } private static boolean isStartArrayOrObject(JsonToken token) { return token == JsonToken.START_ARRAY || token == JsonToken.START_OBJECT; } /** * Whether the {@link * * @return Whether the {@link */ public final boolean isEndArrayOrObject() { return isEndArrayOrObject(currentToken()); } private static boolean isEndArrayOrObject(JsonToken token) { return token == JsonToken.END_ARRAY || token == JsonToken.END_OBJECT; } /** * Gets the binary value if the reader is currently pointing to a {@link JsonToken * <p> * This returns the equivalent of {@link Base64 * <p> * If the reader is pointing to a {@link JsonToken * other token type an {@link IllegalStateException} will be thrown. * * @return The binary value based on whether the current token is {@link JsonToken * {@link JsonToken * @throws IllegalStateException If the reader isn't pointing to either {@link JsonToken * {@link JsonToken * @throws IOException If the next value cannot be read as binary. */ public abstract byte[] getBinary() throws IOException; /** * Gets the boolean value if the reader is currently pointing to a {@link JsonToken * <p> * If the reader is pointing to any other token type an {@link IllegalStateException} will be thrown. * <p> * If {@link Boolean} should be read use {@link * * @return The boolean value based on the {@link JsonToken * @throws IllegalStateException If the reader isn't pointing to {@link JsonToken * @throws IOException If the next value cannot be read as a boolean. */ public abstract boolean getBoolean() throws IOException; /** * Gets the float value if the reader is currently pointing to a {@link JsonToken * {@link JsonToken * <p> * {@link JsonToken * converted to a float. * <p> * All other {@link JsonToken} types will throw an {@link IllegalStateException}. * <p> * If {@link Float} should be read use {@link * * @return The float value based on the current token. * @throws NumberFormatException If the current token is a {@link JsonToken * float. * @throws IllegalStateException If the current token isn't a {@link JsonToken * @throws IOException If the next value cannot be read as a float. */ public abstract float getFloat() throws IOException; /** * Gets the double value if the reader is currently pointing to a {@link JsonToken * {@link JsonToken * <p> * {@link JsonToken * converted to a double. * <p> * All other {@link JsonToken} types will throw an {@link IllegalStateException}. * <p> * If {@link Double} should be read use {@link * * @return The double value based on the current token. * @throws NumberFormatException If the current token is a {@link JsonToken * double. * @throws IllegalStateException If the current token isn't a {@link JsonToken * @throws IOException If the next value cannot be read as a double. */ public abstract double getDouble() throws IOException; /** * Gets the int value if the reader is currently pointing to a {@link JsonToken * {@link JsonToken * <p> * {@link JsonToken * converted to an int. * <p> * All other {@link JsonToken} types will throw an {@link IllegalStateException}. * <p> * If {@link Integer} should be read use {@link * * @return The int value based on the current token. * @throws NumberFormatException If the current token is a {@link JsonToken * int. * @throws IllegalStateException If the current token isn't a {@link JsonToken * @throws IOException If the next value cannot be read as an int. */ public abstract int getInt() throws IOException; /** * Gets the long value if the reader is currently pointing to a {@link JsonToken * {@link JsonToken * <p> * {@link JsonToken * converted to a long. * <p> * All other {@link JsonToken} types will throw an {@link IllegalStateException}. * <p> * If {@link Long} should be read use {@link * * @return The long value based on the current token. * @throws NumberFormatException If the current token is a {@link JsonToken * long. * @throws IllegalStateException If the current token isn't a {@link JsonToken * @throws IOException If the next value cannot be read as a long. */ public abstract long getLong() throws IOException; /** * Gets the String value if the reader is currently pointing to a {@link JsonToken * {@link JsonToken * <p> * If the current token is a {@link JsonToken * value will be returned. If the current token is {@link JsonToken * <p> * All other {@link JsonToken} types will throw an {@link IllegalStateException}. * * @return The String value based on the current token. * @throws IllegalStateException If the current token isn't a {@link JsonToken * {@link JsonToken * @throws IOException If the next value cannot be read as a String. */ public abstract String getString() throws IOException; /** * Gets the field name if the reader is currently pointing to a {@link JsonToken * <p> * All other {@link JsonToken} types will throw an {@link IllegalStateException}. * * @return The field name based on the current token. * @throws IllegalStateException If the current token isn't a {@link JsonToken * @throws IOException If the next value cannot be read as a field name. */ public abstract String getFieldName() throws IOException; /** * Convenience method to read a nullable type. * <p> * If the {@link * will be passed into the {@code nonNullGetter} function to get the value. Effectively, this is the generic form of * the get*NullableValue methods. * * @param nonNullGetter Function that reads the non-null JSON value. * @param <T> Type returned by the function. * @return null if the {@link * {@code nonNullGetter}. * @throws IOException If the next value cannot be read as a nullable. */ public final <T> T getNullable(ReadValueCallback<JsonReader, T> nonNullGetter) throws IOException { return currentToken() == JsonToken.NULL ? null : nonNullGetter.read(this); } /** * Recursively skips the JSON token sub-stream if the current token is either {@link JsonToken * {@link JsonToken * <p> * If the current token isn't the beginning of an array or object this method is a no-op. * * @throws IOException If the children cannot be skipped. */ public abstract void skipChildren() throws IOException; /** * Reads and returns the current JSON object the {@link JsonReader} is pointing to. This will mutate the current * location of this {@link JsonReader}. * <p> * If the {@link * {@link IllegalStateException} will be thrown. * <p> * If the {@link * JSON field is the {@link * create a new JSON object with only a subset of fields (those remaining from when the method is called). * <p> * The returned {@link JsonReader} is able to be {@link * * @return The buffered JSON object the {@link JsonReader} was pointing to. * @throws IllegalStateException If the {@link * {@link JsonToken * @throws IOException If the child object cannot be buffered. */ public abstract JsonReader bufferObject() throws IOException; /** * Indicates whether the {@link JsonReader} supports {@link * * @return Whether {@link */ public abstract boolean isResetSupported(); /** * Creates a new {@link JsonReader} reset to the beginning of the JSON stream. * <p> * Use {@link * and it isn't supported an {@link IllegalStateException} will be thrown. * * @return A new {@link JsonReader} reset to the beginning of the JSON stream. * @throws IllegalStateException If resetting isn't supported by the current JsonReader. * @throws IOException If the {@link JsonReader} cannot be reset. */ public abstract JsonReader reset() throws IOException; /** * Recursively reads the JSON token sub-stream if the current token is either {@link JsonToken * {@link JsonToken * <p> * If the {@link * be read. * * @return The raw textual value of the JSON token sub-stream. * @throws IOException If the children cannot be read. */ public final String readChildren() throws IOException { return readInternal(new StringBuilder(), true, false).toString(); } /** * Recursively reads the JSON token sub-stream if the current token is either {@link JsonToken * {@link JsonToken * <p> * If the {@link * be read. * * @param buffer The {@link StringBuilder} where the read sub-stream will be written. * @throws NullPointerException If {@code buffer} is null. * @throws IOException If the children cannot be read. */ public final void readChildren(StringBuilder buffer) throws IOException { readInternal(buffer, true, false); } /** * Reads the remaining fields in the current JSON object as a JSON object. * <p> * If the {@link * {@link * object where the first field is the current field and reads the remaining fields in the JSON object. * <p> * If the {@link * be read. * * @return The raw textual value of the remaining JSON fields. * @throws IOException If the remaining JSON fields cannot be read. */ public final String readRemainingFieldsAsJsonObject() throws IOException { return readInternal(new StringBuilder(), false, true).toString(); } /** * Reads the remaining fields in the current JSON object as a JSON object. * <p> * If the {@link * {@link * a JSON object where the first field is the current field and reads the remaining fields in the JSON object. * <p> * If the {@link * be read. * * @param buffer The {@link StringBuilder} where the remaining JSON fields will be written. * @throws NullPointerException If {@code buffer} is null. * @throws IOException If the remaining JSON fields cannot be read. */ public final void readRemainingFieldsAsJsonObject(StringBuilder buffer) throws IOException { readInternal(buffer, false, true); } private StringBuilder readInternal(StringBuilder buffer, boolean canStartAtArray, boolean canStartAtFieldName) throws IOException { Objects.requireNonNull(buffer, "The 'buffer' used to read the JSON object cannot be null."); JsonToken token = currentToken(); boolean canRead = (token == JsonToken.START_OBJECT) || (canStartAtArray && token == JsonToken.START_ARRAY) || (canStartAtFieldName && token == JsonToken.FIELD_NAME); if (!canRead) { return buffer; } if (token == JsonToken.FIELD_NAME) { buffer.append("{\""); ENCODER.quoteAsString(getFieldName(), buffer); buffer.append("\":"); token = nextToken(); } appendJson(buffer, token); int depth = 1; while (depth > 0) { JsonToken previousToken = token; token = nextToken(); if (isStartArrayOrObject(token)) { depth++; } else if (isEndArrayOrObject(token)) { depth--; } else if (token == null) { return buffer; } if (!(isStartArrayOrObject(previousToken) || isEndArrayOrObject(token) || previousToken == JsonToken.FIELD_NAME)) { buffer.append(','); } appendJson(buffer, token); } return buffer; } /** * Convenience method to read a JSON element into a buffer. * * @param buffer The buffer where the JSON element value will be written. * @param token The type of the JSON element. * @throws IOException If an error occurs while reading the JSON element. */ private void appendJson(StringBuilder buffer, JsonToken token) throws IOException { if (token == JsonToken.FIELD_NAME) { buffer.append("\""); ENCODER.quoteAsString(getFieldName(), buffer); buffer.append("\":"); } else if (token == JsonToken.STRING) { buffer.append("\""); ENCODER.quoteAsString(getString(), buffer); buffer.append("\""); } else { buffer.append(getText()); } } /** * Reads a JSON object. * <p> * If the {@link * is still null or {@link JsonToken * {@link JsonToken * <p> * Once the JSON stream is prepared for object reading this will get the next token and pass this {@link JsonReader} * into the {@code objectReaderFunc} to handle reading the object. * <p> * If a JSON array should be read use {@link * {@link * * @param objectReaderFunc Function that reads each value of the key-value pair. * @param <T> The value element type. * @return The read JSON object, or null if the {@link JsonToken} is null or {@link JsonToken * @throws IllegalStateException If the token isn't {@link JsonToken * @throws IOException If the object cannot be read. */ public final <T> T readObject(ReadValueCallback<JsonReader, T> objectReaderFunc) throws IOException { return readMapOrObject(objectReaderFunc, false); } /** * Reads a JSON array. * <p> * If the {@link * is still null or {@link JsonToken * {@link JsonToken * <p> * Once the JSON stream is prepared for element reading this will get the element token and pass this * {@link JsonReader} into the {@code elementReaderFunc} to handle reading the element of the array. If the array * has no elements an empty list will be returned. * <p> * If a JSON object should be read use {@link * {@link * * @param elementReaderFunc Function that reads each element of the array. * @param <T> The array element type. * @return The read JSON array, or null if the {@link JsonToken} is null or {@link JsonToken * @throws IllegalStateException If the token isn't {@link JsonToken * @throws IOException If the array cannot be read. */ public final <T> List<T> readArray(ReadValueCallback<JsonReader, T> elementReaderFunc) throws IOException { JsonToken currentToken = currentToken(); if (currentToken == null) { currentToken = nextToken(); } if (currentToken == JsonToken.NULL || currentToken == null) { return null; } else if (currentToken != JsonToken.START_ARRAY) { throw new IllegalStateException("Unexpected token to begin array deserialization: " + currentToken); } List<T> array = new LinkedList<>(); while (nextToken() != JsonToken.END_ARRAY) { array.add(elementReaderFunc.read(this)); } return array; } /** * Reads a JSON map. * <p> * If the {@link * is still null or {@link JsonToken * {@link JsonToken * <p> * Once the JSON stream is prepared for key-value reading this will get the next token and read the field name as * the key then get the next token after that and pass this {@link JsonReader} into the {@code valueReaderFunc} to * handle reading the value of the key-value pair. If the object has no elements an empty map will be returned. * <p> * If a JSON object should be read use {@link * {@link * * @param valueReaderFunc Function that reads each value of the key-value pair. * @param <T> The value element type. * @return The read JSON map, or null if the {@link JsonToken} is null or {@link JsonToken * @throws IllegalStateException If the token isn't {@link JsonToken * @throws IOException If the map cannot be read. */ public final <T> Map<String, T> readMap(ReadValueCallback<JsonReader, T> valueReaderFunc) throws IOException { return readMapOrObject(reader -> { Map<String, T> map = new LinkedHashMap<>(); while (nextToken() != JsonToken.END_OBJECT) { String fieldName = getFieldName(); nextToken(); map.put(fieldName, valueReaderFunc.read(this)); } return map; }, true); } private <T> T readMapOrObject(ReadValueCallback<JsonReader, T> valueReaderFunc, boolean isMap) throws IOException { JsonToken currentToken = currentToken(); if (currentToken == null) { currentToken = nextToken(); } if (currentToken == JsonToken.NULL || currentToken == null) { return null; } else if (currentToken != JsonToken.START_OBJECT) { String type = isMap ? "map" : "object"; throw new IllegalStateException("Unexpected token to begin " + type + " deserialization: " + currentToken); } return valueReaderFunc.read(this); } /** * Reads an untyped object. * <p> * If the {@link * <p> * If the starting token is {@link JsonToken * {@link JsonToken * for reading an unknown type. If the untyped object is deeply nested an {@link IllegalStateException} will also be * thrown to prevent a stack overflow exception. * <p> * The returned object will be one of the following: * * <ul> * <li>null if the starting token is null or {@link JsonToken * <li>true or false if the starting token is {@link JsonToken * <li>One of int, long, float, or double is the starting token is {@link JsonToken * containing value will be used if the number is an integer</li> * <li>An array of untyped elements if the starting point is {@link JsonToken * <li>A map of String-untyped value if the starting point is {@link JsonToken * </ul> * * @return The untyped value based on the outlined return types above. * @throws IllegalStateException If the starting point of the object is {@link JsonToken * {@link JsonToken * @throws IOException If the untyped cannot be read. */ public final Object readUntyped() throws IOException { JsonToken token = currentToken(); if (token == null) { token = nextToken(); } if (token == JsonToken.END_ARRAY || token == JsonToken.END_OBJECT || token == JsonToken.FIELD_NAME) { throw new IllegalStateException("Unexpected token to begin an untyped field: " + token); } return readUntypedHelper(0); } /** * Gets the text value for the {@link * <p> * The following is how each {@link JsonToken} type is handled: * * <ul> * <li>{@link JsonToken * <li>{@link JsonToken * <li>{@link JsonToken * <li>{@link JsonToken * <li>{@link JsonToken * <li>{@link JsonToken * <li>{@link JsonToken * <li>{@link JsonToken * <li>{@link JsonToken * </ul> * * If the current token is null an {@link IllegalStateException} will be thrown. * * @return The text value for the {@link * @throws IllegalStateException If the current token is null. * @throws IOException If the text cannot be read. */ public final String getText() throws IOException { JsonToken token = currentToken(); if (token == null) { throw new IllegalStateException("Current token cannot be null."); } switch (token) { case START_OBJECT: return "{"; case END_OBJECT: return "}"; case START_ARRAY: return "["; case END_ARRAY: return "]"; case FIELD_NAME: return getFieldName(); case BOOLEAN: return String.valueOf(getBoolean()); case NUMBER: case STRING: return getString(); case NULL: return "null"; default: return ""; } } }
I was looking into this further to see if we can infer the existing stack depth from the underlying implementation, it looks possible in Jackson 2.15 as deserialization context now tracks depth but I wasn't able to find anything in GSON. This likely should get updated to track overall stack depth as we could already be 10-20 layers deep but for now this should be more than sufficient.
private Object readUntypedHelper(int depth) throws IOException { if (depth >= 999) { throw new IllegalStateException("Untyped object exceeded allowed object nested depth of 1000."); } JsonToken token = currentToken(); if (token == JsonToken.NULL || token == null) { return null; } else if (token == JsonToken.BOOLEAN) { return getBoolean(); } else if (token == JsonToken.NUMBER) { String numberText = getText(); if ("INF".equals(numberText) || "Infinity".equals(numberText) || "-INF".equals(numberText) || "-Infinity".equals(numberText) || "NaN".equals(numberText)) { return numberText; } else if (numberText.contains(".")) { return Double.parseDouble(numberText); } else { try { return Integer.parseInt(numberText); } catch (NumberFormatException ex) { return Long.parseLong(numberText); } } } else if (token == JsonToken.STRING) { return getString(); } else if (token == JsonToken.START_ARRAY) { List<Object> array = new ArrayList<>(); while (nextToken() != JsonToken.END_ARRAY) { array.add(readUntypedHelper(depth + 1)); } return array; } else if (token == JsonToken.START_OBJECT) { Map<String, Object> object = new LinkedHashMap<>(); while (nextToken() != JsonToken.END_OBJECT) { String fieldName = getFieldName(); nextToken(); Object value = readUntypedHelper(depth + 1); object.put(fieldName, value); } return object; } throw new IllegalStateException("Unknown token type while reading an untyped field: " + token); }
if (depth >= 999) {
private Object readUntypedHelper(int depth) throws IOException { if (depth >= 999) { throw new IllegalStateException("Untyped object exceeded allowed object nested depth of 1000."); } JsonToken token = currentToken(); if (token == JsonToken.NULL || token == null) { return null; } else if (token == JsonToken.BOOLEAN) { return getBoolean(); } else if (token == JsonToken.NUMBER) { String numberText = getText(); if ("INF".equals(numberText) || "Infinity".equals(numberText) || "-INF".equals(numberText) || "-Infinity".equals(numberText) || "NaN".equals(numberText)) { return numberText; } else if (numberText.contains(".")) { return Double.parseDouble(numberText); } else { try { return Integer.parseInt(numberText); } catch (NumberFormatException ex) { return Long.parseLong(numberText); } } } else if (token == JsonToken.STRING) { return getString(); } else if (token == JsonToken.START_ARRAY) { List<Object> array = new ArrayList<>(); while (nextToken() != JsonToken.END_ARRAY) { array.add(readUntypedHelper(depth + 1)); } return array; } else if (token == JsonToken.START_OBJECT) { Map<String, Object> object = new LinkedHashMap<>(); while (nextToken() != JsonToken.END_OBJECT) { String fieldName = getFieldName(); nextToken(); Object value = readUntypedHelper(depth + 1); object.put(fieldName, value); } return object; } throw new IllegalStateException("Unknown token type while reading an untyped field: " + token); }
class JsonReader implements Closeable { private static final JsonStringEncoder ENCODER = JsonStringEncoder.getInstance(); /** * Creates an instance of {@link JsonReader}. */ public JsonReader() { } /** * Gets the {@link JsonToken} that the reader currently points. * <p> * Returns null if the reader isn't pointing to a token. This happens if the reader hasn't begun to read the JSON * value or if reading of the JSON value has completed. * * @return The {@link JsonToken} that the reader currently points, or null if the reader isn't pointing to a token. */ public abstract JsonToken currentToken(); /** * Iterates to and returns the next {@link JsonToken} in the JSON encoded value. * <p> * Returns null if iterating to the next token completes reading of the JSON encoded value. * * @return The next {@link JsonToken} in the JSON encoded value, or null if reading completes. * @throws IOException If the next token cannot be determined. */ public abstract JsonToken nextToken() throws IOException; /** * Closes the JSON stream. * * @throws IOException If the underlying content store fails to close. */ @Override public abstract void close() throws IOException; /** * Whether the {@link * * @return Whether the {@link */ public final boolean isStartArrayOrObject() { return isStartArrayOrObject(currentToken()); } private static boolean isStartArrayOrObject(JsonToken token) { return token == JsonToken.START_ARRAY || token == JsonToken.START_OBJECT; } /** * Whether the {@link * * @return Whether the {@link */ public final boolean isEndArrayOrObject() { return isEndArrayOrObject(currentToken()); } private static boolean isEndArrayOrObject(JsonToken token) { return token == JsonToken.END_ARRAY || token == JsonToken.END_OBJECT; } /** * Gets the binary value if the reader is currently pointing to a {@link JsonToken * <p> * This returns the equivalent of {@link Base64 * <p> * If the reader is pointing to a {@link JsonToken * other token type an {@link IllegalStateException} will be thrown. * * @return The binary value based on whether the current token is {@link JsonToken * {@link JsonToken * @throws IllegalStateException If the reader isn't pointing to either {@link JsonToken * {@link JsonToken * @throws IOException If the next value cannot be read as binary. */ public abstract byte[] getBinary() throws IOException; /** * Gets the boolean value if the reader is currently pointing to a {@link JsonToken * <p> * If the reader is pointing to any other token type an {@link IllegalStateException} will be thrown. * <p> * If {@link Boolean} should be read use {@link * * @return The boolean value based on the {@link JsonToken * @throws IllegalStateException If the reader isn't pointing to {@link JsonToken * @throws IOException If the next value cannot be read as a boolean. */ public abstract boolean getBoolean() throws IOException; /** * Gets the float value if the reader is currently pointing to a {@link JsonToken * {@link JsonToken * <p> * {@link JsonToken * converted to a float. * <p> * All other {@link JsonToken} types will throw an {@link IllegalStateException}. * <p> * If {@link Float} should be read use {@link * * @return The float value based on the current token. * @throws NumberFormatException If the current token is a {@link JsonToken * float. * @throws IllegalStateException If the current token isn't a {@link JsonToken * @throws IOException If the next value cannot be read as a float. */ public abstract float getFloat() throws IOException; /** * Gets the double value if the reader is currently pointing to a {@link JsonToken * {@link JsonToken * <p> * {@link JsonToken * converted to a double. * <p> * All other {@link JsonToken} types will throw an {@link IllegalStateException}. * <p> * If {@link Double} should be read use {@link * * @return The double value based on the current token. * @throws NumberFormatException If the current token is a {@link JsonToken * double. * @throws IllegalStateException If the current token isn't a {@link JsonToken * @throws IOException If the next value cannot be read as a double. */ public abstract double getDouble() throws IOException; /** * Gets the int value if the reader is currently pointing to a {@link JsonToken * {@link JsonToken * <p> * {@link JsonToken * converted to an int. * <p> * All other {@link JsonToken} types will throw an {@link IllegalStateException}. * <p> * If {@link Integer} should be read use {@link * * @return The int value based on the current token. * @throws NumberFormatException If the current token is a {@link JsonToken * int. * @throws IllegalStateException If the current token isn't a {@link JsonToken * @throws IOException If the next value cannot be read as an int. */ public abstract int getInt() throws IOException; /** * Gets the long value if the reader is currently pointing to a {@link JsonToken * {@link JsonToken * <p> * {@link JsonToken * converted to a long. * <p> * All other {@link JsonToken} types will throw an {@link IllegalStateException}. * <p> * If {@link Long} should be read use {@link * * @return The long value based on the current token. * @throws NumberFormatException If the current token is a {@link JsonToken * long. * @throws IllegalStateException If the current token isn't a {@link JsonToken * @throws IOException If the next value cannot be read as a long. */ public abstract long getLong() throws IOException; /** * Gets the String value if the reader is currently pointing to a {@link JsonToken * {@link JsonToken * <p> * If the current token is a {@link JsonToken * value will be returned. If the current token is {@link JsonToken * <p> * All other {@link JsonToken} types will throw an {@link IllegalStateException}. * * @return The String value based on the current token. * @throws IllegalStateException If the current token isn't a {@link JsonToken * {@link JsonToken * @throws IOException If the next value cannot be read as a String. */ public abstract String getString() throws IOException; /** * Gets the field name if the reader is currently pointing to a {@link JsonToken * <p> * All other {@link JsonToken} types will throw an {@link IllegalStateException}. * * @return The field name based on the current token. * @throws IllegalStateException If the current token isn't a {@link JsonToken * @throws IOException If the next value cannot be read as a field name. */ public abstract String getFieldName() throws IOException; /** * Convenience method to read a nullable type. * <p> * If the {@link * will be passed into the {@code nonNullGetter} function to get the value. Effectively, this is the generic form of * the get*NullableValue methods. * * @param nonNullGetter Function that reads the non-null JSON value. * @param <T> Type returned by the function. * @return null if the {@link * {@code nonNullGetter}. * @throws IOException If the next value cannot be read as a nullable. */ public final <T> T getNullable(ReadValueCallback<JsonReader, T> nonNullGetter) throws IOException { return currentToken() == JsonToken.NULL ? null : nonNullGetter.read(this); } /** * Recursively skips the JSON token sub-stream if the current token is either {@link JsonToken * {@link JsonToken * <p> * If the current token isn't the beginning of an array or object this method is a no-op. * * @throws IOException If the children cannot be skipped. */ public abstract void skipChildren() throws IOException; /** * Reads and returns the current JSON object the {@link JsonReader} is pointing to. This will mutate the current * location of this {@link JsonReader}. * <p> * If the {@link * {@link IllegalStateException} will be thrown. * <p> * If the {@link * JSON field is the {@link * create a new JSON object with only a subset of fields (those remaining from when the method is called). * <p> * The returned {@link JsonReader} is able to be {@link * * @return The buffered JSON object the {@link JsonReader} was pointing to. * @throws IllegalStateException If the {@link * {@link JsonToken * @throws IOException If the child object cannot be buffered. */ public abstract JsonReader bufferObject() throws IOException; /** * Indicates whether the {@link JsonReader} supports {@link * * @return Whether {@link */ public abstract boolean isResetSupported(); /** * Creates a new {@link JsonReader} reset to the beginning of the JSON stream. * <p> * Use {@link * and it isn't supported an {@link IllegalStateException} will be thrown. * * @return A new {@link JsonReader} reset to the beginning of the JSON stream. * @throws IllegalStateException If resetting isn't supported by the current JsonReader. * @throws IOException If the {@link JsonReader} cannot be reset. */ public abstract JsonReader reset() throws IOException; /** * Recursively reads the JSON token sub-stream if the current token is either {@link JsonToken * {@link JsonToken * <p> * If the {@link * be read. * * @return The raw textual value of the JSON token sub-stream. * @throws IOException If the children cannot be read. */ public final String readChildren() throws IOException { return readInternal(new StringBuilder(), true, false).toString(); } /** * Recursively reads the JSON token sub-stream if the current token is either {@link JsonToken * {@link JsonToken * <p> * If the {@link * be read. * * @param buffer The {@link StringBuilder} where the read sub-stream will be written. * @throws NullPointerException If {@code buffer} is null. * @throws IOException If the children cannot be read. */ public final void readChildren(StringBuilder buffer) throws IOException { readInternal(buffer, true, false); } /** * Reads the remaining fields in the current JSON object as a JSON object. * <p> * If the {@link * {@link * object where the first field is the current field and reads the remaining fields in the JSON object. * <p> * If the {@link * be read. * * @return The raw textual value of the remaining JSON fields. * @throws IOException If the remaining JSON fields cannot be read. */ public final String readRemainingFieldsAsJsonObject() throws IOException { return readInternal(new StringBuilder(), false, true).toString(); } /** * Reads the remaining fields in the current JSON object as a JSON object. * <p> * If the {@link * {@link * a JSON object where the first field is the current field and reads the remaining fields in the JSON object. * <p> * If the {@link * be read. * * @param buffer The {@link StringBuilder} where the remaining JSON fields will be written. * @throws NullPointerException If {@code buffer} is null. * @throws IOException If the remaining JSON fields cannot be read. */ public final void readRemainingFieldsAsJsonObject(StringBuilder buffer) throws IOException { readInternal(buffer, false, true); } private StringBuilder readInternal(StringBuilder buffer, boolean canStartAtArray, boolean canStartAtFieldName) throws IOException { Objects.requireNonNull(buffer, "The 'buffer' used to read the JSON object cannot be null."); JsonToken token = currentToken(); boolean canRead = (token == JsonToken.START_OBJECT) || (canStartAtArray && token == JsonToken.START_ARRAY) || (canStartAtFieldName && token == JsonToken.FIELD_NAME); if (!canRead) { return buffer; } if (token == JsonToken.FIELD_NAME) { buffer.append("{\""); ENCODER.quoteAsString(getFieldName(), buffer); buffer.append("\":"); token = nextToken(); } appendJson(buffer, token); int depth = 1; while (depth > 0) { JsonToken previousToken = token; token = nextToken(); if (isStartArrayOrObject(token)) { depth++; } else if (isEndArrayOrObject(token)) { depth--; } else if (token == null) { return buffer; } if (!(isStartArrayOrObject(previousToken) || isEndArrayOrObject(token) || previousToken == JsonToken.FIELD_NAME)) { buffer.append(','); } appendJson(buffer, token); } return buffer; } /** * Convenience method to read a JSON element into a buffer. * * @param buffer The buffer where the JSON element value will be written. * @param token The type of the JSON element. * @throws IOException If an error occurs while reading the JSON element. */ private void appendJson(StringBuilder buffer, JsonToken token) throws IOException { if (token == JsonToken.FIELD_NAME) { buffer.append("\""); ENCODER.quoteAsString(getFieldName(), buffer); buffer.append("\":"); } else if (token == JsonToken.STRING) { buffer.append("\""); ENCODER.quoteAsString(getString(), buffer); buffer.append("\""); } else { buffer.append(getText()); } } /** * Reads a JSON object. * <p> * If the {@link * is still null or {@link JsonToken * {@link JsonToken * <p> * Once the JSON stream is prepared for object reading this will get the next token and pass this {@link JsonReader} * into the {@code objectReaderFunc} to handle reading the object. * <p> * If a JSON array should be read use {@link * {@link * * @param objectReaderFunc Function that reads each value of the key-value pair. * @param <T> The value element type. * @return The read JSON object, or null if the {@link JsonToken} is null or {@link JsonToken * @throws IllegalStateException If the token isn't {@link JsonToken * @throws IOException If the object cannot be read. */ public final <T> T readObject(ReadValueCallback<JsonReader, T> objectReaderFunc) throws IOException { return readMapOrObject(objectReaderFunc, false); } /** * Reads a JSON array. * <p> * If the {@link * is still null or {@link JsonToken * {@link JsonToken * <p> * Once the JSON stream is prepared for element reading this will get the element token and pass this * {@link JsonReader} into the {@code elementReaderFunc} to handle reading the element of the array. If the array * has no elements an empty list will be returned. * <p> * If a JSON object should be read use {@link * {@link * * @param elementReaderFunc Function that reads each element of the array. * @param <T> The array element type. * @return The read JSON array, or null if the {@link JsonToken} is null or {@link JsonToken * @throws IllegalStateException If the token isn't {@link JsonToken * @throws IOException If the array cannot be read. */ public final <T> List<T> readArray(ReadValueCallback<JsonReader, T> elementReaderFunc) throws IOException { JsonToken currentToken = currentToken(); if (currentToken == null) { currentToken = nextToken(); } if (currentToken == JsonToken.NULL || currentToken == null) { return null; } else if (currentToken != JsonToken.START_ARRAY) { throw new IllegalStateException("Unexpected token to begin array deserialization: " + currentToken); } List<T> array = new LinkedList<>(); while (nextToken() != JsonToken.END_ARRAY) { array.add(elementReaderFunc.read(this)); } return array; } /** * Reads a JSON map. * <p> * If the {@link * is still null or {@link JsonToken * {@link JsonToken * <p> * Once the JSON stream is prepared for key-value reading this will get the next token and read the field name as * the key then get the next token after that and pass this {@link JsonReader} into the {@code valueReaderFunc} to * handle reading the value of the key-value pair. If the object has no elements an empty map will be returned. * <p> * If a JSON object should be read use {@link * {@link * * @param valueReaderFunc Function that reads each value of the key-value pair. * @param <T> The value element type. * @return The read JSON map, or null if the {@link JsonToken} is null or {@link JsonToken * @throws IllegalStateException If the token isn't {@link JsonToken * @throws IOException If the map cannot be read. */ public final <T> Map<String, T> readMap(ReadValueCallback<JsonReader, T> valueReaderFunc) throws IOException { return readMapOrObject(reader -> { Map<String, T> map = new LinkedHashMap<>(); while (nextToken() != JsonToken.END_OBJECT) { String fieldName = getFieldName(); nextToken(); map.put(fieldName, valueReaderFunc.read(this)); } return map; }, true); } private <T> T readMapOrObject(ReadValueCallback<JsonReader, T> valueReaderFunc, boolean isMap) throws IOException { JsonToken currentToken = currentToken(); if (currentToken == null) { currentToken = nextToken(); } if (currentToken == JsonToken.NULL || currentToken == null) { return null; } else if (currentToken != JsonToken.START_OBJECT) { String type = isMap ? "map" : "object"; throw new IllegalStateException("Unexpected token to begin " + type + " deserialization: " + currentToken); } return valueReaderFunc.read(this); } /** * Reads an untyped object. * <p> * If the {@link * <p> * If the starting token is {@link JsonToken * {@link JsonToken * for reading an unknown type. If the untyped object is deeply nested an {@link IllegalStateException} will also be * thrown to prevent a stack overflow exception. * <p> * The returned object will be one of the following: * * <ul> * <li>null if the starting token is null or {@link JsonToken * <li>true or false if the starting token is {@link JsonToken * <li>One of int, long, float, or double is the starting token is {@link JsonToken * containing value will be used if the number is an integer</li> * <li>An array of untyped elements if the starting point is {@link JsonToken * <li>A map of String-untyped value if the starting point is {@link JsonToken * </ul> * * @return The untyped value based on the outlined return types above. * @throws IllegalStateException If the starting point of the object is {@link JsonToken * {@link JsonToken * @throws IOException If the untyped cannot be read. */ public final Object readUntyped() throws IOException { JsonToken token = currentToken(); if (token == null) { token = nextToken(); } if (token == JsonToken.END_ARRAY || token == JsonToken.END_OBJECT || token == JsonToken.FIELD_NAME) { throw new IllegalStateException("Unexpected token to begin an untyped field: " + token); } return readUntypedHelper(0); } /** * Gets the text value for the {@link * <p> * The following is how each {@link JsonToken} type is handled: * * <ul> * <li>{@link JsonToken * <li>{@link JsonToken * <li>{@link JsonToken * <li>{@link JsonToken * <li>{@link JsonToken * <li>{@link JsonToken * <li>{@link JsonToken * <li>{@link JsonToken * <li>{@link JsonToken * </ul> * * If the current token is null an {@link IllegalStateException} will be thrown. * * @return The text value for the {@link * @throws IllegalStateException If the current token is null. * @throws IOException If the text cannot be read. */ public final String getText() throws IOException { JsonToken token = currentToken(); if (token == null) { throw new IllegalStateException("Current token cannot be null."); } switch (token) { case START_OBJECT: return "{"; case END_OBJECT: return "}"; case START_ARRAY: return "["; case END_ARRAY: return "]"; case FIELD_NAME: return getFieldName(); case BOOLEAN: return String.valueOf(getBoolean()); case NUMBER: case STRING: return getString(); case NULL: return "null"; default: return ""; } } }
class JsonReader implements Closeable { private static final JsonStringEncoder ENCODER = JsonStringEncoder.getInstance(); /** * Creates an instance of {@link JsonReader}. */ public JsonReader() { } /** * Gets the {@link JsonToken} that the reader currently points. * <p> * Returns null if the reader isn't pointing to a token. This happens if the reader hasn't begun to read the JSON * value or if reading of the JSON value has completed. * * @return The {@link JsonToken} that the reader currently points, or null if the reader isn't pointing to a token. */ public abstract JsonToken currentToken(); /** * Iterates to and returns the next {@link JsonToken} in the JSON encoded value. * <p> * Returns null if iterating to the next token completes reading of the JSON encoded value. * * @return The next {@link JsonToken} in the JSON encoded value, or null if reading completes. * @throws IOException If the next token cannot be determined. */ public abstract JsonToken nextToken() throws IOException; /** * Closes the JSON stream. * * @throws IOException If the underlying content store fails to close. */ @Override public abstract void close() throws IOException; /** * Whether the {@link * * @return Whether the {@link */ public final boolean isStartArrayOrObject() { return isStartArrayOrObject(currentToken()); } private static boolean isStartArrayOrObject(JsonToken token) { return token == JsonToken.START_ARRAY || token == JsonToken.START_OBJECT; } /** * Whether the {@link * * @return Whether the {@link */ public final boolean isEndArrayOrObject() { return isEndArrayOrObject(currentToken()); } private static boolean isEndArrayOrObject(JsonToken token) { return token == JsonToken.END_ARRAY || token == JsonToken.END_OBJECT; } /** * Gets the binary value if the reader is currently pointing to a {@link JsonToken * <p> * This returns the equivalent of {@link Base64 * <p> * If the reader is pointing to a {@link JsonToken * other token type an {@link IllegalStateException} will be thrown. * * @return The binary value based on whether the current token is {@link JsonToken * {@link JsonToken * @throws IllegalStateException If the reader isn't pointing to either {@link JsonToken * {@link JsonToken * @throws IOException If the next value cannot be read as binary. */ public abstract byte[] getBinary() throws IOException; /** * Gets the boolean value if the reader is currently pointing to a {@link JsonToken * <p> * If the reader is pointing to any other token type an {@link IllegalStateException} will be thrown. * <p> * If {@link Boolean} should be read use {@link * * @return The boolean value based on the {@link JsonToken * @throws IllegalStateException If the reader isn't pointing to {@link JsonToken * @throws IOException If the next value cannot be read as a boolean. */ public abstract boolean getBoolean() throws IOException; /** * Gets the float value if the reader is currently pointing to a {@link JsonToken * {@link JsonToken * <p> * {@link JsonToken * converted to a float. * <p> * All other {@link JsonToken} types will throw an {@link IllegalStateException}. * <p> * If {@link Float} should be read use {@link * * @return The float value based on the current token. * @throws NumberFormatException If the current token is a {@link JsonToken * float. * @throws IllegalStateException If the current token isn't a {@link JsonToken * @throws IOException If the next value cannot be read as a float. */ public abstract float getFloat() throws IOException; /** * Gets the double value if the reader is currently pointing to a {@link JsonToken * {@link JsonToken * <p> * {@link JsonToken * converted to a double. * <p> * All other {@link JsonToken} types will throw an {@link IllegalStateException}. * <p> * If {@link Double} should be read use {@link * * @return The double value based on the current token. * @throws NumberFormatException If the current token is a {@link JsonToken * double. * @throws IllegalStateException If the current token isn't a {@link JsonToken * @throws IOException If the next value cannot be read as a double. */ public abstract double getDouble() throws IOException; /** * Gets the int value if the reader is currently pointing to a {@link JsonToken * {@link JsonToken * <p> * {@link JsonToken * converted to an int. * <p> * All other {@link JsonToken} types will throw an {@link IllegalStateException}. * <p> * If {@link Integer} should be read use {@link * * @return The int value based on the current token. * @throws NumberFormatException If the current token is a {@link JsonToken * int. * @throws IllegalStateException If the current token isn't a {@link JsonToken * @throws IOException If the next value cannot be read as an int. */ public abstract int getInt() throws IOException; /** * Gets the long value if the reader is currently pointing to a {@link JsonToken * {@link JsonToken * <p> * {@link JsonToken * converted to a long. * <p> * All other {@link JsonToken} types will throw an {@link IllegalStateException}. * <p> * If {@link Long} should be read use {@link * * @return The long value based on the current token. * @throws NumberFormatException If the current token is a {@link JsonToken * long. * @throws IllegalStateException If the current token isn't a {@link JsonToken * @throws IOException If the next value cannot be read as a long. */ public abstract long getLong() throws IOException; /** * Gets the String value if the reader is currently pointing to a {@link JsonToken * {@link JsonToken * <p> * If the current token is a {@link JsonToken * value will be returned. If the current token is {@link JsonToken * <p> * All other {@link JsonToken} types will throw an {@link IllegalStateException}. * * @return The String value based on the current token. * @throws IllegalStateException If the current token isn't a {@link JsonToken * {@link JsonToken * @throws IOException If the next value cannot be read as a String. */ public abstract String getString() throws IOException; /** * Gets the field name if the reader is currently pointing to a {@link JsonToken * <p> * All other {@link JsonToken} types will throw an {@link IllegalStateException}. * * @return The field name based on the current token. * @throws IllegalStateException If the current token isn't a {@link JsonToken * @throws IOException If the next value cannot be read as a field name. */ public abstract String getFieldName() throws IOException; /** * Convenience method to read a nullable type. * <p> * If the {@link * will be passed into the {@code nonNullGetter} function to get the value. Effectively, this is the generic form of * the get*NullableValue methods. * * @param nonNullGetter Function that reads the non-null JSON value. * @param <T> Type returned by the function. * @return null if the {@link * {@code nonNullGetter}. * @throws IOException If the next value cannot be read as a nullable. */ public final <T> T getNullable(ReadValueCallback<JsonReader, T> nonNullGetter) throws IOException { return currentToken() == JsonToken.NULL ? null : nonNullGetter.read(this); } /** * Recursively skips the JSON token sub-stream if the current token is either {@link JsonToken * {@link JsonToken * <p> * If the current token isn't the beginning of an array or object this method is a no-op. * * @throws IOException If the children cannot be skipped. */ public abstract void skipChildren() throws IOException; /** * Reads and returns the current JSON object the {@link JsonReader} is pointing to. This will mutate the current * location of this {@link JsonReader}. * <p> * If the {@link * {@link IllegalStateException} will be thrown. * <p> * If the {@link * JSON field is the {@link * create a new JSON object with only a subset of fields (those remaining from when the method is called). * <p> * The returned {@link JsonReader} is able to be {@link * * @return The buffered JSON object the {@link JsonReader} was pointing to. * @throws IllegalStateException If the {@link * {@link JsonToken * @throws IOException If the child object cannot be buffered. */ public abstract JsonReader bufferObject() throws IOException; /** * Indicates whether the {@link JsonReader} supports {@link * * @return Whether {@link */ public abstract boolean isResetSupported(); /** * Creates a new {@link JsonReader} reset to the beginning of the JSON stream. * <p> * Use {@link * and it isn't supported an {@link IllegalStateException} will be thrown. * * @return A new {@link JsonReader} reset to the beginning of the JSON stream. * @throws IllegalStateException If resetting isn't supported by the current JsonReader. * @throws IOException If the {@link JsonReader} cannot be reset. */ public abstract JsonReader reset() throws IOException; /** * Recursively reads the JSON token sub-stream if the current token is either {@link JsonToken * {@link JsonToken * <p> * If the {@link * be read. * * @return The raw textual value of the JSON token sub-stream. * @throws IOException If the children cannot be read. */ public final String readChildren() throws IOException { return readInternal(new StringBuilder(), true, false).toString(); } /** * Recursively reads the JSON token sub-stream if the current token is either {@link JsonToken * {@link JsonToken * <p> * If the {@link * be read. * * @param buffer The {@link StringBuilder} where the read sub-stream will be written. * @throws NullPointerException If {@code buffer} is null. * @throws IOException If the children cannot be read. */ public final void readChildren(StringBuilder buffer) throws IOException { readInternal(buffer, true, false); } /** * Reads the remaining fields in the current JSON object as a JSON object. * <p> * If the {@link * {@link * object where the first field is the current field and reads the remaining fields in the JSON object. * <p> * If the {@link * be read. * * @return The raw textual value of the remaining JSON fields. * @throws IOException If the remaining JSON fields cannot be read. */ public final String readRemainingFieldsAsJsonObject() throws IOException { return readInternal(new StringBuilder(), false, true).toString(); } /** * Reads the remaining fields in the current JSON object as a JSON object. * <p> * If the {@link * {@link * a JSON object where the first field is the current field and reads the remaining fields in the JSON object. * <p> * If the {@link * be read. * * @param buffer The {@link StringBuilder} where the remaining JSON fields will be written. * @throws NullPointerException If {@code buffer} is null. * @throws IOException If the remaining JSON fields cannot be read. */ public final void readRemainingFieldsAsJsonObject(StringBuilder buffer) throws IOException { readInternal(buffer, false, true); } private StringBuilder readInternal(StringBuilder buffer, boolean canStartAtArray, boolean canStartAtFieldName) throws IOException { Objects.requireNonNull(buffer, "The 'buffer' used to read the JSON object cannot be null."); JsonToken token = currentToken(); boolean canRead = (token == JsonToken.START_OBJECT) || (canStartAtArray && token == JsonToken.START_ARRAY) || (canStartAtFieldName && token == JsonToken.FIELD_NAME); if (!canRead) { return buffer; } if (token == JsonToken.FIELD_NAME) { buffer.append("{\""); ENCODER.quoteAsString(getFieldName(), buffer); buffer.append("\":"); token = nextToken(); } appendJson(buffer, token); int depth = 1; while (depth > 0) { JsonToken previousToken = token; token = nextToken(); if (isStartArrayOrObject(token)) { depth++; } else if (isEndArrayOrObject(token)) { depth--; } else if (token == null) { return buffer; } if (!(isStartArrayOrObject(previousToken) || isEndArrayOrObject(token) || previousToken == JsonToken.FIELD_NAME)) { buffer.append(','); } appendJson(buffer, token); } return buffer; } /** * Convenience method to read a JSON element into a buffer. * * @param buffer The buffer where the JSON element value will be written. * @param token The type of the JSON element. * @throws IOException If an error occurs while reading the JSON element. */ private void appendJson(StringBuilder buffer, JsonToken token) throws IOException { if (token == JsonToken.FIELD_NAME) { buffer.append("\""); ENCODER.quoteAsString(getFieldName(), buffer); buffer.append("\":"); } else if (token == JsonToken.STRING) { buffer.append("\""); ENCODER.quoteAsString(getString(), buffer); buffer.append("\""); } else { buffer.append(getText()); } } /** * Reads a JSON object. * <p> * If the {@link * is still null or {@link JsonToken * {@link JsonToken * <p> * Once the JSON stream is prepared for object reading this will get the next token and pass this {@link JsonReader} * into the {@code objectReaderFunc} to handle reading the object. * <p> * If a JSON array should be read use {@link * {@link * * @param objectReaderFunc Function that reads each value of the key-value pair. * @param <T> The value element type. * @return The read JSON object, or null if the {@link JsonToken} is null or {@link JsonToken * @throws IllegalStateException If the token isn't {@link JsonToken * @throws IOException If the object cannot be read. */ public final <T> T readObject(ReadValueCallback<JsonReader, T> objectReaderFunc) throws IOException { return readMapOrObject(objectReaderFunc, false); } /** * Reads a JSON array. * <p> * If the {@link * is still null or {@link JsonToken * {@link JsonToken * <p> * Once the JSON stream is prepared for element reading this will get the element token and pass this * {@link JsonReader} into the {@code elementReaderFunc} to handle reading the element of the array. If the array * has no elements an empty list will be returned. * <p> * If a JSON object should be read use {@link * {@link * * @param elementReaderFunc Function that reads each element of the array. * @param <T> The array element type. * @return The read JSON array, or null if the {@link JsonToken} is null or {@link JsonToken * @throws IllegalStateException If the token isn't {@link JsonToken * @throws IOException If the array cannot be read. */ public final <T> List<T> readArray(ReadValueCallback<JsonReader, T> elementReaderFunc) throws IOException { JsonToken currentToken = currentToken(); if (currentToken == null) { currentToken = nextToken(); } if (currentToken == JsonToken.NULL || currentToken == null) { return null; } else if (currentToken != JsonToken.START_ARRAY) { throw new IllegalStateException("Unexpected token to begin array deserialization: " + currentToken); } List<T> array = new LinkedList<>(); while (nextToken() != JsonToken.END_ARRAY) { array.add(elementReaderFunc.read(this)); } return array; } /** * Reads a JSON map. * <p> * If the {@link * is still null or {@link JsonToken * {@link JsonToken * <p> * Once the JSON stream is prepared for key-value reading this will get the next token and read the field name as * the key then get the next token after that and pass this {@link JsonReader} into the {@code valueReaderFunc} to * handle reading the value of the key-value pair. If the object has no elements an empty map will be returned. * <p> * If a JSON object should be read use {@link * {@link * * @param valueReaderFunc Function that reads each value of the key-value pair. * @param <T> The value element type. * @return The read JSON map, or null if the {@link JsonToken} is null or {@link JsonToken * @throws IllegalStateException If the token isn't {@link JsonToken * @throws IOException If the map cannot be read. */ public final <T> Map<String, T> readMap(ReadValueCallback<JsonReader, T> valueReaderFunc) throws IOException { return readMapOrObject(reader -> { Map<String, T> map = new LinkedHashMap<>(); while (nextToken() != JsonToken.END_OBJECT) { String fieldName = getFieldName(); nextToken(); map.put(fieldName, valueReaderFunc.read(this)); } return map; }, true); } private <T> T readMapOrObject(ReadValueCallback<JsonReader, T> valueReaderFunc, boolean isMap) throws IOException { JsonToken currentToken = currentToken(); if (currentToken == null) { currentToken = nextToken(); } if (currentToken == JsonToken.NULL || currentToken == null) { return null; } else if (currentToken != JsonToken.START_OBJECT) { String type = isMap ? "map" : "object"; throw new IllegalStateException("Unexpected token to begin " + type + " deserialization: " + currentToken); } return valueReaderFunc.read(this); } /** * Reads an untyped object. * <p> * If the {@link * <p> * If the starting token is {@link JsonToken * {@link JsonToken * for reading an unknown type. If the untyped object is deeply nested an {@link IllegalStateException} will also be * thrown to prevent a stack overflow exception. * <p> * The returned object will be one of the following: * * <ul> * <li>null if the starting token is null or {@link JsonToken * <li>true or false if the starting token is {@link JsonToken * <li>One of int, long, float, or double is the starting token is {@link JsonToken * containing value will be used if the number is an integer</li> * <li>An array of untyped elements if the starting point is {@link JsonToken * <li>A map of String-untyped value if the starting point is {@link JsonToken * </ul> * * @return The untyped value based on the outlined return types above. * @throws IllegalStateException If the starting point of the object is {@link JsonToken * {@link JsonToken * @throws IOException If the untyped cannot be read. */ public final Object readUntyped() throws IOException { JsonToken token = currentToken(); if (token == null) { token = nextToken(); } if (token == JsonToken.END_ARRAY || token == JsonToken.END_OBJECT || token == JsonToken.FIELD_NAME) { throw new IllegalStateException("Unexpected token to begin an untyped field: " + token); } return readUntypedHelper(0); } /** * Gets the text value for the {@link * <p> * The following is how each {@link JsonToken} type is handled: * * <ul> * <li>{@link JsonToken * <li>{@link JsonToken * <li>{@link JsonToken * <li>{@link JsonToken * <li>{@link JsonToken * <li>{@link JsonToken * <li>{@link JsonToken * <li>{@link JsonToken * <li>{@link JsonToken * </ul> * * If the current token is null an {@link IllegalStateException} will be thrown. * * @return The text value for the {@link * @throws IllegalStateException If the current token is null. * @throws IOException If the text cannot be read. */ public final String getText() throws IOException { JsonToken token = currentToken(); if (token == null) { throw new IllegalStateException("Current token cannot be null."); } switch (token) { case START_OBJECT: return "{"; case END_OBJECT: return "}"; case START_ARRAY: return "["; case END_ARRAY: return "]"; case FIELD_NAME: return getFieldName(); case BOOLEAN: return String.valueOf(getBoolean()); case NUMBER: case STRING: return getString(); case NULL: return "null"; default: return ""; } } }
Just confirming, is the removal of this setter intentional? the code comment says "_and enabling auto registering of new schemas_"
public static void main(String[] args) { TokenCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); SchemaRegistryAsyncClient schemaRegistryAsyncClient = new SchemaRegistryClientBuilder() .credential(tokenCredential) .fullyQualifiedNamespace("{schema-registry-endpoint}") .buildAsyncClient(); SchemaRegistryApacheAvroSerializer serializer = new SchemaRegistryApacheAvroSerializerBuilder() .schemaRegistryClient(schemaRegistryAsyncClient) .schemaGroup("{schema-group}") .avroSpecificReader(true) .buildSerializer(); EventData eventData = getEventDataToDeserialize(serializer); PlayingCard deserializedObject = serializer.deserialize(eventData, TypeReference.createInstance(PlayingCard.class)); MessageContent message = getMessageToDeserialize(serializer); PlayingCard deserializedMessage = serializer.deserialize(eventData, TypeReference.createInstance(PlayingCard.class)); }
public static void main(String[] args) { TokenCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); SchemaRegistryAsyncClient schemaRegistryAsyncClient = new SchemaRegistryClientBuilder() .credential(tokenCredential) .fullyQualifiedNamespace("{schema-registry-endpoint}") .buildAsyncClient(); SchemaRegistryApacheAvroSerializer serializer = new SchemaRegistryApacheAvroSerializerBuilder() .schemaRegistryClient(schemaRegistryAsyncClient) .schemaGroup("{schema-group}") .avroSpecificReader(true) .buildSerializer(); EventData eventData = getEventDataToDeserialize(serializer); PlayingCard deserializedObject = serializer.deserialize(eventData, TypeReference.createInstance(PlayingCard.class)); MessageContent message = getMessageToDeserialize(serializer); PlayingCard deserializedMessage = serializer.deserialize(message, TypeReference.createInstance(PlayingCard.class)); }
class SchemaRegistryApacheAvroDeserializationSample { /** * Main method to run this sample. * * @param args Ignore arguments. */ private static MessageContent getMessageToDeserialize(SchemaRegistryApacheAvroSerializer serializer) { PlayingCard playingCard = new PlayingCard(); playingCard.setCardValue(5); playingCard.setIsFaceCard(false); playingCard.setPlayingCardSuit(PlayingCardSuit.SPADES); return serializer.serialize(playingCard, TypeReference.createInstance(MessageContent.class)); } private static EventData getEventDataToDeserialize(SchemaRegistryApacheAvroSerializer serializer) { PlayingCard playingCard = new PlayingCard(); playingCard.setCardValue(5); playingCard.setIsFaceCard(false); playingCard.setPlayingCardSuit(PlayingCardSuit.SPADES); return serializer.serialize(playingCard, TypeReference.createInstance(EventData.class)); } }
class SchemaRegistryApacheAvroDeserializationSample { /** * Main method to run this sample. * * @param args Ignore arguments. */ private static MessageContent getMessageToDeserialize(SchemaRegistryApacheAvroSerializer serializer) { PlayingCard playingCard = new PlayingCard(); playingCard.setCardValue(5); playingCard.setIsFaceCard(false); playingCard.setPlayingCardSuit(PlayingCardSuit.SPADES); return serializer.serialize(playingCard, TypeReference.createInstance(MessageContent.class)); } private static EventData getEventDataToDeserialize(SchemaRegistryApacheAvroSerializer serializer) { PlayingCard playingCard = new PlayingCard(); playingCard.setCardValue(5); playingCard.setIsFaceCard(false); playingCard.setPlayingCardSuit(PlayingCardSuit.SPADES); return serializer.serialize(playingCard, TypeReference.createInstance(EventData.class)); } }
confirming - if the parameter for this deserialize (in line 54) is _eventData_ not _message_?
public static void main(String[] args) { TokenCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); SchemaRegistryAsyncClient schemaRegistryAsyncClient = new SchemaRegistryClientBuilder() .credential(tokenCredential) .fullyQualifiedNamespace("{schema-registry-endpoint}") .buildAsyncClient(); SchemaRegistryApacheAvroSerializer serializer = new SchemaRegistryApacheAvroSerializerBuilder() .schemaRegistryClient(schemaRegistryAsyncClient) .schemaGroup("{schema-group}") .avroSpecificReader(true) .buildSerializer(); EventData eventData = getEventDataToDeserialize(serializer); PlayingCard deserializedObject = serializer.deserialize(eventData, TypeReference.createInstance(PlayingCard.class)); MessageContent message = getMessageToDeserialize(serializer); PlayingCard deserializedMessage = serializer.deserialize(eventData, TypeReference.createInstance(PlayingCard.class)); }
PlayingCard deserializedMessage = serializer.deserialize(eventData,
public static void main(String[] args) { TokenCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); SchemaRegistryAsyncClient schemaRegistryAsyncClient = new SchemaRegistryClientBuilder() .credential(tokenCredential) .fullyQualifiedNamespace("{schema-registry-endpoint}") .buildAsyncClient(); SchemaRegistryApacheAvroSerializer serializer = new SchemaRegistryApacheAvroSerializerBuilder() .schemaRegistryClient(schemaRegistryAsyncClient) .schemaGroup("{schema-group}") .avroSpecificReader(true) .buildSerializer(); EventData eventData = getEventDataToDeserialize(serializer); PlayingCard deserializedObject = serializer.deserialize(eventData, TypeReference.createInstance(PlayingCard.class)); MessageContent message = getMessageToDeserialize(serializer); PlayingCard deserializedMessage = serializer.deserialize(message, TypeReference.createInstance(PlayingCard.class)); }
class SchemaRegistryApacheAvroDeserializationSample { /** * Main method to run this sample. * * @param args Ignore arguments. */ private static MessageContent getMessageToDeserialize(SchemaRegistryApacheAvroSerializer serializer) { PlayingCard playingCard = new PlayingCard(); playingCard.setCardValue(5); playingCard.setIsFaceCard(false); playingCard.setPlayingCardSuit(PlayingCardSuit.SPADES); return serializer.serialize(playingCard, TypeReference.createInstance(MessageContent.class)); } private static EventData getEventDataToDeserialize(SchemaRegistryApacheAvroSerializer serializer) { PlayingCard playingCard = new PlayingCard(); playingCard.setCardValue(5); playingCard.setIsFaceCard(false); playingCard.setPlayingCardSuit(PlayingCardSuit.SPADES); return serializer.serialize(playingCard, TypeReference.createInstance(EventData.class)); } }
class SchemaRegistryApacheAvroDeserializationSample { /** * Main method to run this sample. * * @param args Ignore arguments. */ private static MessageContent getMessageToDeserialize(SchemaRegistryApacheAvroSerializer serializer) { PlayingCard playingCard = new PlayingCard(); playingCard.setCardValue(5); playingCard.setIsFaceCard(false); playingCard.setPlayingCardSuit(PlayingCardSuit.SPADES); return serializer.serialize(playingCard, TypeReference.createInstance(MessageContent.class)); } private static EventData getEventDataToDeserialize(SchemaRegistryApacheAvroSerializer serializer) { PlayingCard playingCard = new PlayingCard(); playingCard.setCardValue(5); playingCard.setIsFaceCard(false); playingCard.setPlayingCardSuit(PlayingCardSuit.SPADES); return serializer.serialize(playingCard, TypeReference.createInstance(EventData.class)); } }
Yes. Let me fix the comment. Usually don't want in production scenarios to register a schema if you don't recognise it.
public static void main(String[] args) { TokenCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); SchemaRegistryAsyncClient schemaRegistryAsyncClient = new SchemaRegistryClientBuilder() .credential(tokenCredential) .fullyQualifiedNamespace("{schema-registry-endpoint}") .buildAsyncClient(); SchemaRegistryApacheAvroSerializer serializer = new SchemaRegistryApacheAvroSerializerBuilder() .schemaRegistryClient(schemaRegistryAsyncClient) .schemaGroup("{schema-group}") .avroSpecificReader(true) .buildSerializer(); EventData eventData = getEventDataToDeserialize(serializer); PlayingCard deserializedObject = serializer.deserialize(eventData, TypeReference.createInstance(PlayingCard.class)); MessageContent message = getMessageToDeserialize(serializer); PlayingCard deserializedMessage = serializer.deserialize(eventData, TypeReference.createInstance(PlayingCard.class)); }
public static void main(String[] args) { TokenCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); SchemaRegistryAsyncClient schemaRegistryAsyncClient = new SchemaRegistryClientBuilder() .credential(tokenCredential) .fullyQualifiedNamespace("{schema-registry-endpoint}") .buildAsyncClient(); SchemaRegistryApacheAvroSerializer serializer = new SchemaRegistryApacheAvroSerializerBuilder() .schemaRegistryClient(schemaRegistryAsyncClient) .schemaGroup("{schema-group}") .avroSpecificReader(true) .buildSerializer(); EventData eventData = getEventDataToDeserialize(serializer); PlayingCard deserializedObject = serializer.deserialize(eventData, TypeReference.createInstance(PlayingCard.class)); MessageContent message = getMessageToDeserialize(serializer); PlayingCard deserializedMessage = serializer.deserialize(message, TypeReference.createInstance(PlayingCard.class)); }
class SchemaRegistryApacheAvroDeserializationSample { /** * Main method to run this sample. * * @param args Ignore arguments. */ private static MessageContent getMessageToDeserialize(SchemaRegistryApacheAvroSerializer serializer) { PlayingCard playingCard = new PlayingCard(); playingCard.setCardValue(5); playingCard.setIsFaceCard(false); playingCard.setPlayingCardSuit(PlayingCardSuit.SPADES); return serializer.serialize(playingCard, TypeReference.createInstance(MessageContent.class)); } private static EventData getEventDataToDeserialize(SchemaRegistryApacheAvroSerializer serializer) { PlayingCard playingCard = new PlayingCard(); playingCard.setCardValue(5); playingCard.setIsFaceCard(false); playingCard.setPlayingCardSuit(PlayingCardSuit.SPADES); return serializer.serialize(playingCard, TypeReference.createInstance(EventData.class)); } }
class SchemaRegistryApacheAvroDeserializationSample { /** * Main method to run this sample. * * @param args Ignore arguments. */ private static MessageContent getMessageToDeserialize(SchemaRegistryApacheAvroSerializer serializer) { PlayingCard playingCard = new PlayingCard(); playingCard.setCardValue(5); playingCard.setIsFaceCard(false); playingCard.setPlayingCardSuit(PlayingCardSuit.SPADES); return serializer.serialize(playingCard, TypeReference.createInstance(MessageContent.class)); } private static EventData getEventDataToDeserialize(SchemaRegistryApacheAvroSerializer serializer) { PlayingCard playingCard = new PlayingCard(); playingCard.setCardValue(5); playingCard.setIsFaceCard(false); playingCard.setPlayingCardSuit(PlayingCardSuit.SPADES); return serializer.serialize(playingCard, TypeReference.createInstance(EventData.class)); } }
That's a mistake.
public static void main(String[] args) { TokenCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); SchemaRegistryAsyncClient schemaRegistryAsyncClient = new SchemaRegistryClientBuilder() .credential(tokenCredential) .fullyQualifiedNamespace("{schema-registry-endpoint}") .buildAsyncClient(); SchemaRegistryApacheAvroSerializer serializer = new SchemaRegistryApacheAvroSerializerBuilder() .schemaRegistryClient(schemaRegistryAsyncClient) .schemaGroup("{schema-group}") .avroSpecificReader(true) .buildSerializer(); EventData eventData = getEventDataToDeserialize(serializer); PlayingCard deserializedObject = serializer.deserialize(eventData, TypeReference.createInstance(PlayingCard.class)); MessageContent message = getMessageToDeserialize(serializer); PlayingCard deserializedMessage = serializer.deserialize(eventData, TypeReference.createInstance(PlayingCard.class)); }
PlayingCard deserializedMessage = serializer.deserialize(eventData,
public static void main(String[] args) { TokenCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); SchemaRegistryAsyncClient schemaRegistryAsyncClient = new SchemaRegistryClientBuilder() .credential(tokenCredential) .fullyQualifiedNamespace("{schema-registry-endpoint}") .buildAsyncClient(); SchemaRegistryApacheAvroSerializer serializer = new SchemaRegistryApacheAvroSerializerBuilder() .schemaRegistryClient(schemaRegistryAsyncClient) .schemaGroup("{schema-group}") .avroSpecificReader(true) .buildSerializer(); EventData eventData = getEventDataToDeserialize(serializer); PlayingCard deserializedObject = serializer.deserialize(eventData, TypeReference.createInstance(PlayingCard.class)); MessageContent message = getMessageToDeserialize(serializer); PlayingCard deserializedMessage = serializer.deserialize(message, TypeReference.createInstance(PlayingCard.class)); }
class SchemaRegistryApacheAvroDeserializationSample { /** * Main method to run this sample. * * @param args Ignore arguments. */ private static MessageContent getMessageToDeserialize(SchemaRegistryApacheAvroSerializer serializer) { PlayingCard playingCard = new PlayingCard(); playingCard.setCardValue(5); playingCard.setIsFaceCard(false); playingCard.setPlayingCardSuit(PlayingCardSuit.SPADES); return serializer.serialize(playingCard, TypeReference.createInstance(MessageContent.class)); } private static EventData getEventDataToDeserialize(SchemaRegistryApacheAvroSerializer serializer) { PlayingCard playingCard = new PlayingCard(); playingCard.setCardValue(5); playingCard.setIsFaceCard(false); playingCard.setPlayingCardSuit(PlayingCardSuit.SPADES); return serializer.serialize(playingCard, TypeReference.createInstance(EventData.class)); } }
class SchemaRegistryApacheAvroDeserializationSample { /** * Main method to run this sample. * * @param args Ignore arguments. */ private static MessageContent getMessageToDeserialize(SchemaRegistryApacheAvroSerializer serializer) { PlayingCard playingCard = new PlayingCard(); playingCard.setCardValue(5); playingCard.setIsFaceCard(false); playingCard.setPlayingCardSuit(PlayingCardSuit.SPADES); return serializer.serialize(playingCard, TypeReference.createInstance(MessageContent.class)); } private static EventData getEventDataToDeserialize(SchemaRegistryApacheAvroSerializer serializer) { PlayingCard playingCard = new PlayingCard(); playingCard.setCardValue(5); playingCard.setIsFaceCard(false); playingCard.setPlayingCardSuit(PlayingCardSuit.SPADES); return serializer.serialize(playingCard, TypeReference.createInstance(EventData.class)); } }
nit: you can do assertNotNull instead of `assertEquals(...)`
public void listRoomTestFirstRoomIsNotNullThenDeleteRoomWithOutResponse(HttpClient httpClient) { roomsAsyncClient = setupAsyncClient(httpClient, "listRoomTestFirstRoomIsValidSuccess"); assertNotNull(roomsAsyncClient); CreateRoomOptions createRoomOptions = new CreateRoomOptions() .setValidFrom(VALID_FROM) .setValidUntil(VALID_UNTIL); Mono<CommunicationRoom> createCommunicationRoom = roomsAsyncClient.createRoom(createRoomOptions); StepVerifier.create(createCommunicationRoom) .assertNext(roomResult -> { assertEquals(true, roomResult.getRoomId() != null); assertEquals(true, roomResult.getCreatedAt() != null); assertEquals(true, roomResult.getValidFrom() != null); assertEquals(true, roomResult.getValidUntil() != null); }).verifyComplete(); String roomId = createCommunicationRoom.block().getRoomId(); PagedFlux<CommunicationRoom> listRoomResponse = roomsAsyncClient.listRooms(); StepVerifier.create(listRoomResponse.take(1)) .assertNext(room -> { assertEquals(true, room.getRoomId() != null); assertEquals(true, room.getCreatedAt() != null); assertEquals(true, room.getValidFrom() != null); assertEquals(true, room.getValidUntil() != null); }) .expectComplete() .verify(); Mono<Response<Void>> deleteResponse = roomsAsyncClient.deleteRoomWithResponse(roomId); StepVerifier.create(deleteResponse) .assertNext(result -> { assertEquals(result.getStatusCode(), 204); }).verifyComplete(); }
assertEquals(true, roomResult.getRoomId() != null);
public void listRoomTestFirstRoomIsNotNullThenDeleteRoomWithOutResponse(HttpClient httpClient) { roomsAsyncClient = setupAsyncClient(httpClient, "listRoomTestFirstRoomIsValidSuccess"); assertNotNull(roomsAsyncClient); CreateRoomOptions createRoomOptions = new CreateRoomOptions() .setValidFrom(VALID_FROM) .setValidUntil(VALID_UNTIL); Mono<CommunicationRoom> createCommunicationRoom = roomsAsyncClient.createRoom(createRoomOptions); StepVerifier.create(createCommunicationRoom) .assertNext(roomResult -> { assertEquals(true, roomResult.getRoomId() != null); assertEquals(true, roomResult.getCreatedAt() != null); assertEquals(true, roomResult.getValidFrom() != null); assertEquals(true, roomResult.getValidUntil() != null); }).verifyComplete(); String roomId = createCommunicationRoom.block().getRoomId(); PagedFlux<CommunicationRoom> listRoomResponse = roomsAsyncClient.listRooms(); StepVerifier.create(listRoomResponse.take(1)) .assertNext(room -> { assertEquals(true, room.getRoomId() != null); assertEquals(true, room.getCreatedAt() != null); assertEquals(true, room.getValidFrom() != null); assertEquals(true, room.getValidUntil() != null); }) .expectComplete() .verify(); Mono<Response<Void>> deleteResponse = roomsAsyncClient.deleteRoomWithResponse(roomId); StepVerifier.create(deleteResponse) .assertNext(result -> { assertEquals(result.getStatusCode(), 204); }).verifyComplete(); }
class RoomsAsyncClientTests extends RoomsTestBase { private RoomsAsyncClient roomsAsyncClient; private CommunicationIdentityClient communicationClient; private final String nonExistRoomId = "NotExistingRoomID"; @Override protected void beforeTest() { super.beforeTest(); } @Override protected void afterTest() { super.afterTest(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void createRoomFullCycleWithResponseStep(HttpClient httpClient) { roomsAsyncClient = setupAsyncClient(httpClient, "createRoomFullCycleWithResponse"); assertNotNull(roomsAsyncClient); RoomParticipant firstParticipant = new RoomParticipant(communicationClient.createUser()); List<RoomParticipant> participants = Arrays.asList(firstParticipant); CreateRoomOptions roomOptions = new CreateRoomOptions() .setValidFrom(VALID_FROM) .setValidUntil(VALID_UNTIL) .setParticipants(participants); Mono<Response<CommunicationRoom>> response1 = roomsAsyncClient.createRoomWithResponse(roomOptions); StepVerifier.create(response1) .assertNext(roomResult -> { assertHappyPath(roomResult, 201); }) .verifyComplete(); String roomId = response1.block().getValue().getRoomId(); UpdateRoomOptions updateOptions = new UpdateRoomOptions() .setValidFrom(VALID_FROM) .setValidUntil(VALID_FROM.plusMonths(3)); Mono<Response<CommunicationRoom>> response3 = roomsAsyncClient.updateRoomWithResponse(roomId, updateOptions); System.out.println(VALID_FROM.plusMonths(3).getDayOfYear()); StepVerifier.create(response3) .assertNext(roomResult -> { assertHappyPath(roomResult, 200); }).verifyComplete(); Mono<Response<CommunicationRoom>> response4 = roomsAsyncClient.getRoomWithResponse(roomId); StepVerifier.create(response4) .assertNext(result4 -> { assertHappyPath(result4, 200); }).verifyComplete(); Mono<Response<Void>> response5 = roomsAsyncClient.deleteRoomWithResponse(roomId); StepVerifier.create(response5) .assertNext(result5 -> { assertEquals(result5.getStatusCode(), 204); }).verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void createRoomFullCycleWithOutResponseStep(HttpClient httpClient) { roomsAsyncClient = setupAsyncClient(httpClient, "createRoomFullCycleWithOutResponse"); assertNotNull(roomsAsyncClient); RoomParticipant firstParticipant = new RoomParticipant(communicationClient.createUser()); List<RoomParticipant> participants = Arrays.asList(firstParticipant); CreateRoomOptions roomOptions = new CreateRoomOptions() .setValidFrom(VALID_FROM) .setValidUntil(VALID_UNTIL) .setParticipants(participants); Mono<CommunicationRoom> response1 = roomsAsyncClient.createRoom(roomOptions); StepVerifier.create(response1) .assertNext(roomResult -> { assertEquals(true, roomResult.getRoomId() != null); assertEquals(true, roomResult.getCreatedAt() != null); assertEquals(true, roomResult.getValidFrom() != null); assertEquals(true, roomResult.getValidUntil() != null); }).verifyComplete(); String roomId = response1.block().getRoomId(); UpdateRoomOptions updateOptions = new UpdateRoomOptions() .setValidFrom(VALID_FROM) .setValidUntil(VALID_FROM.plusMonths(3)); Mono<CommunicationRoom> response3 = roomsAsyncClient.updateRoom(roomId, updateOptions); StepVerifier.create(response3) .assertNext(result3 -> { assertEquals(true, result3.getValidUntil().toEpochSecond() > VALID_FROM.toEpochSecond()); }).verifyComplete(); Mono<CommunicationRoom> response4 = roomsAsyncClient.getRoom(roomId); StepVerifier.create(response4) .assertNext(result4 -> { assertEquals(result4.getRoomId(), roomId); }).verifyComplete(); Mono<Response<Void>> response5 = roomsAsyncClient.deleteRoomWithResponse(roomId); StepVerifier.create(response5) .assertNext(result5 -> { assertEquals(result5.getStatusCode(), 204); }).verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void addUpdateAndRemoveParticipantsOperationsWithFullFlow(HttpClient httpClient) { roomsAsyncClient = setupAsyncClient(httpClient, "addUpdateAndRemoveParticipantsOperationsWithFullFlow"); assertNotNull(roomsAsyncClient); CreateRoomOptions createRoomOptions = new CreateRoomOptions() .setValidFrom(VALID_FROM) .setValidUntil(VALID_UNTIL); Mono<CommunicationRoom> createCommunicationRoom = roomsAsyncClient.createRoom(createRoomOptions); StepVerifier.create(createCommunicationRoom) .assertNext(roomResult -> { assertEquals(true, roomResult.getRoomId() != null); assertEquals(true, roomResult.getCreatedAt() != null); assertEquals(true, roomResult.getValidFrom() != null); assertEquals(true, roomResult.getValidUntil() != null); }).verifyComplete(); String roomId = createCommunicationRoom.block().getRoomId(); PagedFlux<RoomParticipant> listParticipantsResponse1 = roomsAsyncClient.listParticipants(roomId); StepVerifier.create(listParticipantsResponse1.count()) .expectNext(0L) .verifyComplete(); RoomParticipant firstParticipant = new RoomParticipant(communicationClient.createUser()); RoomParticipant secondParticipant = new RoomParticipant(communicationClient.createUser()); RoomParticipant thirdParticipant = new RoomParticipant(communicationClient.createUser()) .setRole(ParticipantRole.CONSUMER); List<RoomParticipant> participants = Arrays.asList(firstParticipant, secondParticipant, thirdParticipant); AddOrUpdateParticipantsResult addParticipantResponse = roomsAsyncClient.addOrUpdateParticipants(roomId, participants).block(); PagedFlux<RoomParticipant> listParticipantsResponse2 = roomsAsyncClient.listParticipants(roomId); StepVerifier.create(listParticipantsResponse2.count()) .expectNext(3L) .verifyComplete(); StepVerifier.create(listParticipantsResponse2) .expectSubscription() .thenConsumeWhile(participant -> true, participant -> { if (participant.getCommunicationIdentifier().getRawId() == secondParticipant .getCommunicationIdentifier().getRawId()) { assertEquals(ParticipantRole.ATTENDEE, participant.getRole()); } }) .expectComplete() .verify(); RoomParticipant firstParticipantUpdated = new RoomParticipant(firstParticipant.getCommunicationIdentifier()) .setRole(ParticipantRole.CONSUMER); RoomParticipant secondParticipantUpdated = new RoomParticipant(secondParticipant.getCommunicationIdentifier()) .setRole(ParticipantRole.CONSUMER); List<RoomParticipant> participantsToUpdate = Arrays.asList(firstParticipantUpdated, secondParticipantUpdated); Mono<AddOrUpdateParticipantsResult> updateParticipantResponse = roomsAsyncClient.addOrUpdateParticipants(roomId, participantsToUpdate); StepVerifier.create(updateParticipantResponse) .assertNext(result -> { assertEquals(true, result instanceof AddOrUpdateParticipantsResult); }) .verifyComplete(); PagedFlux<RoomParticipant> listParticipantsResponse3 = roomsAsyncClient.listParticipants(roomId); StepVerifier.create(listParticipantsResponse3) .expectSubscription() .thenConsumeWhile(participant -> true, participant -> { assertEquals(ParticipantRole.CONSUMER, participant.getRole()); }) .expectComplete() .verify(); List<CommunicationIdentifier> participantsIdentifiersForParticipants = Arrays.asList( firstParticipant.getCommunicationIdentifier(), secondParticipant.getCommunicationIdentifier()); Mono<RemoveParticipantsResult> removeParticipantResponse = roomsAsyncClient.removeParticipants(roomId, participantsIdentifiersForParticipants); StepVerifier.create(removeParticipantResponse) .assertNext(result -> { assertEquals(true, result instanceof RemoveParticipantsResult); }) .verifyComplete(); PagedFlux<RoomParticipant> listParticipantsResponse4 = roomsAsyncClient.listParticipants(roomId); StepVerifier.create(listParticipantsResponse4.count()) .expectNext(1L) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void addParticipantsOperationWithOutResponse(HttpClient httpClient) { roomsAsyncClient = setupAsyncClient(httpClient, "addParticipantsOperationWithOutResponse"); assertNotNull(roomsAsyncClient); CreateRoomOptions createRoomOptions = new CreateRoomOptions() .setValidFrom(VALID_FROM) .setValidUntil(VALID_UNTIL); Mono<CommunicationRoom> createCommunicationRoom = roomsAsyncClient.createRoom(createRoomOptions); StepVerifier.create(createCommunicationRoom) .assertNext(roomResult -> { assertEquals(true, roomResult.getRoomId() != null); assertEquals(true, roomResult.getCreatedAt() != null); assertEquals(true, roomResult.getValidFrom() != null); assertEquals(true, roomResult.getValidUntil() != null); }).verifyComplete(); String roomId = createCommunicationRoom.block().getRoomId(); PagedFlux<RoomParticipant> listParticipantsResponse1 = roomsAsyncClient.listParticipants(roomId); StepVerifier.create(listParticipantsResponse1.count()) .expectNext(0L) .verifyComplete(); RoomParticipant firstParticipant = new RoomParticipant(communicationClient.createUser()); RoomParticipant secondParticipant = new RoomParticipant(communicationClient.createUser()).setRole(null); RoomParticipant thirdParticipant = new RoomParticipant(communicationClient.createUser()) .setRole(ParticipantRole.CONSUMER); List<RoomParticipant> participants = Arrays.asList(firstParticipant, secondParticipant, thirdParticipant); AddOrUpdateParticipantsResult addParticipantResponse = roomsAsyncClient.addOrUpdateParticipants(roomId, participants).block(); PagedFlux<RoomParticipant> listParticipantsResponse2 = roomsAsyncClient.listParticipants(roomId); StepVerifier.create(listParticipantsResponse2.count()) .expectNext(3L) .verifyComplete(); Mono<Response<Void>> response5 = roomsAsyncClient.deleteRoomWithResponse(roomId); StepVerifier.create(response5) .assertNext(result5 -> { assertEquals(result5.getStatusCode(), 204); }).verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void deleteParticipantsWithOutResponseStep(HttpClient httpClient) { roomsAsyncClient = setupAsyncClient(httpClient, "deleteParticipantsWithOutResponseStep"); assertNotNull(roomsAsyncClient); RoomParticipant firstParticipant = new RoomParticipant(communicationClient.createUser()); List<RoomParticipant> participants = Arrays.asList(firstParticipant); CreateRoomOptions roomOptions = new CreateRoomOptions() .setValidFrom(VALID_FROM) .setValidUntil(VALID_UNTIL) .setParticipants(participants); Mono<CommunicationRoom> response1 = roomsAsyncClient.createRoom(roomOptions); String roomId = response1.block().getRoomId(); Mono<RemoveParticipantsResult> response4 = roomsAsyncClient.removeParticipants(roomId, Arrays.asList(firstParticipant.getCommunicationIdentifier())); Mono<Response<Void>> response5 = roomsAsyncClient.deleteRoomWithResponse(roomId); StepVerifier.create(response5) .assertNext(result5 -> { assertEquals(result5.getStatusCode(), 204); }).verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void updateParticipantsToDefaultRoleWithResponseStep(HttpClient httpClient) { roomsAsyncClient = setupAsyncClient(httpClient, "updateParticipantsToDefaultRoleWithResponseStep"); assertNotNull(roomsAsyncClient); RoomParticipant firstParticipant = new RoomParticipant(communicationClient.createUser()) .setRole(ParticipantRole.PRESENTER); List<RoomParticipant> participants = Arrays.asList(firstParticipant); CreateRoomOptions roomOptions = new CreateRoomOptions() .setValidFrom(VALID_FROM) .setValidUntil(VALID_UNTIL) .setParticipants(participants); Mono<Response<CommunicationRoom>> response1 = roomsAsyncClient.createRoomWithResponse(roomOptions); StepVerifier.create(response1) .assertNext(roomResult -> { assertHappyPath(roomResult, 201); }) .verifyComplete(); String roomId = response1.block().getValue().getRoomId(); List<RoomParticipant> participantToUpdate = Arrays .asList(new RoomParticipant(firstParticipant.getCommunicationIdentifier())); Mono<Response<AddOrUpdateParticipantsResult>> response2 = roomsAsyncClient .addOrUpdateParticipantsWithResponse(roomId, participantToUpdate); StepVerifier.create(response2) .assertNext(result2 -> { assertEquals(result2.getStatusCode(), 200); }).verifyComplete(); PagedFlux<RoomParticipant> response3 = roomsAsyncClient.listParticipants(roomId); StepVerifier.create(response3).assertNext(response4 -> { assertEquals(ParticipantRole.ATTENDEE, response4.getRole()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void listParticipantsWithOutResponseStep(HttpClient httpClient) { roomsAsyncClient = setupAsyncClient(httpClient, "listParticipantsWithOutResponseStep"); assertNotNull(roomsAsyncClient); RoomParticipant firstParticipant = new RoomParticipant(communicationClient.createUser()) .setRole(ParticipantRole.PRESENTER); RoomParticipant secondParticipant = new RoomParticipant(communicationClient.createUser()) .setRole(ParticipantRole.PRESENTER); RoomParticipant thirdParticipant = new RoomParticipant(communicationClient.createUser()) .setRole(ParticipantRole.CONSUMER); List<RoomParticipant> participants = Arrays.asList(firstParticipant, secondParticipant, thirdParticipant); CreateRoomOptions roomOptions = new CreateRoomOptions() .setValidFrom(VALID_FROM) .setValidUntil(VALID_UNTIL) .setParticipants(participants); Mono<CommunicationRoom> response1 = roomsAsyncClient.createRoom(roomOptions); String roomId = response1.block().getRoomId(); PagedFlux<RoomParticipant> response2 = roomsAsyncClient.listParticipants(roomId); StepVerifier.create(response2.count()) .expectNext(3L) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void getRoomWithUnexistingRoomId(HttpClient httpClient) { roomsAsyncClient = setupAsyncClient(httpClient, "getRoomWithUnexistingRoomId"); assertNotNull(roomsAsyncClient); StepVerifier.create(roomsAsyncClient.getRoom(nonExistRoomId)).verifyError(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void deleteRoomWithConnectionString(HttpClient httpClient) { roomsAsyncClient = setupAsyncClient(httpClient, "deleteRoomWithConnectionString"); assertNotNull(roomsAsyncClient); StepVerifier.create(roomsAsyncClient.deleteRoomWithResponse(nonExistRoomId)).verifyError(); } private RoomsAsyncClient setupAsyncClient(HttpClient httpClient, String testName) { RoomsClientBuilder builder = getRoomsClientWithConnectionString(httpClient, RoomsServiceVersion.V2023_03_31_PREVIEW); communicationClient = getCommunicationIdentityClientBuilder(httpClient).buildClient(); return addLoggingPolicy(builder, testName).buildAsyncClient(); } }
class RoomsAsyncClientTests extends RoomsTestBase { private RoomsAsyncClient roomsAsyncClient; private CommunicationIdentityClient communicationClient; private final String nonExistRoomId = "NotExistingRoomID"; @Override protected void beforeTest() { super.beforeTest(); } @Override protected void afterTest() { super.afterTest(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void createRoomFullCycleWithResponseStep(HttpClient httpClient) { roomsAsyncClient = setupAsyncClient(httpClient, "createRoomFullCycleWithResponse"); assertNotNull(roomsAsyncClient); RoomParticipant firstParticipant = new RoomParticipant(communicationClient.createUser()); List<RoomParticipant> participants = Arrays.asList(firstParticipant); CreateRoomOptions roomOptions = new CreateRoomOptions() .setValidFrom(VALID_FROM) .setValidUntil(VALID_UNTIL) .setParticipants(participants); Mono<Response<CommunicationRoom>> response1 = roomsAsyncClient.createRoomWithResponse(roomOptions); StepVerifier.create(response1) .assertNext(roomResult -> { assertHappyPath(roomResult, 201); }) .verifyComplete(); String roomId = response1.block().getValue().getRoomId(); UpdateRoomOptions updateOptions = new UpdateRoomOptions() .setValidFrom(VALID_FROM) .setValidUntil(VALID_FROM.plusMonths(3)); Mono<Response<CommunicationRoom>> response3 = roomsAsyncClient.updateRoomWithResponse(roomId, updateOptions); System.out.println(VALID_FROM.plusMonths(3).getDayOfYear()); StepVerifier.create(response3) .assertNext(roomResult -> { assertHappyPath(roomResult, 200); }).verifyComplete(); Mono<Response<CommunicationRoom>> response4 = roomsAsyncClient.getRoomWithResponse(roomId); StepVerifier.create(response4) .assertNext(result4 -> { assertHappyPath(result4, 200); }).verifyComplete(); Mono<Response<Void>> response5 = roomsAsyncClient.deleteRoomWithResponse(roomId); StepVerifier.create(response5) .assertNext(result5 -> { assertEquals(result5.getStatusCode(), 204); }).verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void createRoomFullCycleWithOutResponseStep(HttpClient httpClient) { roomsAsyncClient = setupAsyncClient(httpClient, "createRoomFullCycleWithOutResponse"); assertNotNull(roomsAsyncClient); RoomParticipant firstParticipant = new RoomParticipant(communicationClient.createUser()); List<RoomParticipant> participants = Arrays.asList(firstParticipant); CreateRoomOptions roomOptions = new CreateRoomOptions() .setValidFrom(VALID_FROM) .setValidUntil(VALID_UNTIL) .setParticipants(participants); Mono<CommunicationRoom> response1 = roomsAsyncClient.createRoom(roomOptions); StepVerifier.create(response1) .assertNext(roomResult -> { assertEquals(true, roomResult.getRoomId() != null); assertEquals(true, roomResult.getCreatedAt() != null); assertEquals(true, roomResult.getValidFrom() != null); assertEquals(true, roomResult.getValidUntil() != null); }).verifyComplete(); String roomId = response1.block().getRoomId(); UpdateRoomOptions updateOptions = new UpdateRoomOptions() .setValidFrom(VALID_FROM) .setValidUntil(VALID_FROM.plusMonths(3)); Mono<CommunicationRoom> response3 = roomsAsyncClient.updateRoom(roomId, updateOptions); StepVerifier.create(response3) .assertNext(result3 -> { assertEquals(true, result3.getValidUntil().toEpochSecond() > VALID_FROM.toEpochSecond()); }).verifyComplete(); Mono<CommunicationRoom> response4 = roomsAsyncClient.getRoom(roomId); StepVerifier.create(response4) .assertNext(result4 -> { assertEquals(result4.getRoomId(), roomId); }).verifyComplete(); Mono<Response<Void>> response5 = roomsAsyncClient.deleteRoomWithResponse(roomId); StepVerifier.create(response5) .assertNext(result5 -> { assertEquals(result5.getStatusCode(), 204); }).verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void addUpdateAndRemoveParticipantsOperationsWithFullFlow(HttpClient httpClient) { roomsAsyncClient = setupAsyncClient(httpClient, "addUpdateAndRemoveParticipantsOperationsWithFullFlow"); assertNotNull(roomsAsyncClient); CreateRoomOptions createRoomOptions = new CreateRoomOptions() .setValidFrom(VALID_FROM) .setValidUntil(VALID_UNTIL); Mono<CommunicationRoom> createCommunicationRoom = roomsAsyncClient.createRoom(createRoomOptions); StepVerifier.create(createCommunicationRoom) .assertNext(roomResult -> { assertEquals(true, roomResult.getRoomId() != null); assertEquals(true, roomResult.getCreatedAt() != null); assertEquals(true, roomResult.getValidFrom() != null); assertEquals(true, roomResult.getValidUntil() != null); }).verifyComplete(); String roomId = createCommunicationRoom.block().getRoomId(); PagedFlux<RoomParticipant> listParticipantsResponse1 = roomsAsyncClient.listParticipants(roomId); StepVerifier.create(listParticipantsResponse1.count()) .expectNext(0L) .verifyComplete(); RoomParticipant firstParticipant = new RoomParticipant(communicationClient.createUser()); RoomParticipant secondParticipant = new RoomParticipant(communicationClient.createUser()); RoomParticipant thirdParticipant = new RoomParticipant(communicationClient.createUser()) .setRole(ParticipantRole.CONSUMER); List<RoomParticipant> participants = Arrays.asList(firstParticipant, secondParticipant, thirdParticipant); AddOrUpdateParticipantsResult addParticipantResponse = roomsAsyncClient.addOrUpdateParticipants(roomId, participants).block(); PagedFlux<RoomParticipant> listParticipantsResponse2 = roomsAsyncClient.listParticipants(roomId); StepVerifier.create(listParticipantsResponse2.count()) .expectNext(3L) .verifyComplete(); StepVerifier.create(listParticipantsResponse2) .expectSubscription() .thenConsumeWhile(participant -> true, participant -> { if (participant.getCommunicationIdentifier().getRawId() == secondParticipant .getCommunicationIdentifier().getRawId()) { assertEquals(ParticipantRole.ATTENDEE, participant.getRole()); } }) .expectComplete() .verify(); RoomParticipant firstParticipantUpdated = new RoomParticipant(firstParticipant.getCommunicationIdentifier()) .setRole(ParticipantRole.CONSUMER); RoomParticipant secondParticipantUpdated = new RoomParticipant(secondParticipant.getCommunicationIdentifier()) .setRole(ParticipantRole.CONSUMER); List<RoomParticipant> participantsToUpdate = Arrays.asList(firstParticipantUpdated, secondParticipantUpdated); Mono<AddOrUpdateParticipantsResult> updateParticipantResponse = roomsAsyncClient.addOrUpdateParticipants(roomId, participantsToUpdate); StepVerifier.create(updateParticipantResponse) .assertNext(result -> { assertEquals(true, result instanceof AddOrUpdateParticipantsResult); }) .verifyComplete(); PagedFlux<RoomParticipant> listParticipantsResponse3 = roomsAsyncClient.listParticipants(roomId); StepVerifier.create(listParticipantsResponse3) .expectSubscription() .thenConsumeWhile(participant -> true, participant -> { assertEquals(ParticipantRole.CONSUMER, participant.getRole()); }) .expectComplete() .verify(); List<CommunicationIdentifier> participantsIdentifiersForParticipants = Arrays.asList( firstParticipant.getCommunicationIdentifier(), secondParticipant.getCommunicationIdentifier()); Mono<RemoveParticipantsResult> removeParticipantResponse = roomsAsyncClient.removeParticipants(roomId, participantsIdentifiersForParticipants); StepVerifier.create(removeParticipantResponse) .assertNext(result -> { assertEquals(true, result instanceof RemoveParticipantsResult); }) .verifyComplete(); PagedFlux<RoomParticipant> listParticipantsResponse4 = roomsAsyncClient.listParticipants(roomId); StepVerifier.create(listParticipantsResponse4.count()) .expectNext(1L) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void addParticipantsOperationWithOutResponse(HttpClient httpClient) { roomsAsyncClient = setupAsyncClient(httpClient, "addParticipantsOperationWithOutResponse"); assertNotNull(roomsAsyncClient); CreateRoomOptions createRoomOptions = new CreateRoomOptions() .setValidFrom(VALID_FROM) .setValidUntil(VALID_UNTIL); Mono<CommunicationRoom> createCommunicationRoom = roomsAsyncClient.createRoom(createRoomOptions); StepVerifier.create(createCommunicationRoom) .assertNext(roomResult -> { assertEquals(true, roomResult.getRoomId() != null); assertEquals(true, roomResult.getCreatedAt() != null); assertEquals(true, roomResult.getValidFrom() != null); assertEquals(true, roomResult.getValidUntil() != null); }).verifyComplete(); String roomId = createCommunicationRoom.block().getRoomId(); PagedFlux<RoomParticipant> listParticipantsResponse1 = roomsAsyncClient.listParticipants(roomId); StepVerifier.create(listParticipantsResponse1.count()) .expectNext(0L) .verifyComplete(); RoomParticipant firstParticipant = new RoomParticipant(communicationClient.createUser()); RoomParticipant secondParticipant = new RoomParticipant(communicationClient.createUser()).setRole(null); RoomParticipant thirdParticipant = new RoomParticipant(communicationClient.createUser()) .setRole(ParticipantRole.CONSUMER); List<RoomParticipant> participants = Arrays.asList(firstParticipant, secondParticipant, thirdParticipant); AddOrUpdateParticipantsResult addParticipantResponse = roomsAsyncClient.addOrUpdateParticipants(roomId, participants).block(); PagedFlux<RoomParticipant> listParticipantsResponse2 = roomsAsyncClient.listParticipants(roomId); StepVerifier.create(listParticipantsResponse2.count()) .expectNext(3L) .verifyComplete(); Mono<Response<Void>> response5 = roomsAsyncClient.deleteRoomWithResponse(roomId); StepVerifier.create(response5) .assertNext(result5 -> { assertEquals(result5.getStatusCode(), 204); }).verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void deleteParticipantsWithOutResponseStep(HttpClient httpClient) { roomsAsyncClient = setupAsyncClient(httpClient, "deleteParticipantsWithOutResponseStep"); assertNotNull(roomsAsyncClient); RoomParticipant firstParticipant = new RoomParticipant(communicationClient.createUser()); List<RoomParticipant> participants = Arrays.asList(firstParticipant); CreateRoomOptions roomOptions = new CreateRoomOptions() .setValidFrom(VALID_FROM) .setValidUntil(VALID_UNTIL) .setParticipants(participants); Mono<CommunicationRoom> response1 = roomsAsyncClient.createRoom(roomOptions); String roomId = response1.block().getRoomId(); Mono<RemoveParticipantsResult> response4 = roomsAsyncClient.removeParticipants(roomId, Arrays.asList(firstParticipant.getCommunicationIdentifier())); Mono<Response<Void>> response5 = roomsAsyncClient.deleteRoomWithResponse(roomId); StepVerifier.create(response5) .assertNext(result5 -> { assertEquals(result5.getStatusCode(), 204); }).verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void updateParticipantsToDefaultRoleWithResponseStep(HttpClient httpClient) { roomsAsyncClient = setupAsyncClient(httpClient, "updateParticipantsToDefaultRoleWithResponseStep"); assertNotNull(roomsAsyncClient); RoomParticipant firstParticipant = new RoomParticipant(communicationClient.createUser()) .setRole(ParticipantRole.PRESENTER); List<RoomParticipant> participants = Arrays.asList(firstParticipant); CreateRoomOptions roomOptions = new CreateRoomOptions() .setValidFrom(VALID_FROM) .setValidUntil(VALID_UNTIL) .setParticipants(participants); Mono<Response<CommunicationRoom>> response1 = roomsAsyncClient.createRoomWithResponse(roomOptions); StepVerifier.create(response1) .assertNext(roomResult -> { assertHappyPath(roomResult, 201); }) .verifyComplete(); String roomId = response1.block().getValue().getRoomId(); List<RoomParticipant> participantToUpdate = Arrays .asList(new RoomParticipant(firstParticipant.getCommunicationIdentifier())); Mono<Response<AddOrUpdateParticipantsResult>> response2 = roomsAsyncClient .addOrUpdateParticipantsWithResponse(roomId, participantToUpdate); StepVerifier.create(response2) .assertNext(result2 -> { assertEquals(result2.getStatusCode(), 200); }).verifyComplete(); PagedFlux<RoomParticipant> response3 = roomsAsyncClient.listParticipants(roomId); StepVerifier.create(response3).assertNext(response4 -> { assertEquals(ParticipantRole.ATTENDEE, response4.getRole()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void listParticipantsWithOutResponseStep(HttpClient httpClient) { roomsAsyncClient = setupAsyncClient(httpClient, "listParticipantsWithOutResponseStep"); assertNotNull(roomsAsyncClient); RoomParticipant firstParticipant = new RoomParticipant(communicationClient.createUser()) .setRole(ParticipantRole.PRESENTER); RoomParticipant secondParticipant = new RoomParticipant(communicationClient.createUser()) .setRole(ParticipantRole.PRESENTER); RoomParticipant thirdParticipant = new RoomParticipant(communicationClient.createUser()) .setRole(ParticipantRole.CONSUMER); List<RoomParticipant> participants = Arrays.asList(firstParticipant, secondParticipant, thirdParticipant); CreateRoomOptions roomOptions = new CreateRoomOptions() .setValidFrom(VALID_FROM) .setValidUntil(VALID_UNTIL) .setParticipants(participants); Mono<CommunicationRoom> response1 = roomsAsyncClient.createRoom(roomOptions); String roomId = response1.block().getRoomId(); PagedFlux<RoomParticipant> response2 = roomsAsyncClient.listParticipants(roomId); StepVerifier.create(response2.count()) .expectNext(3L) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void getRoomWithUnexistingRoomId(HttpClient httpClient) { roomsAsyncClient = setupAsyncClient(httpClient, "getRoomWithUnexistingRoomId"); assertNotNull(roomsAsyncClient); StepVerifier.create(roomsAsyncClient.getRoom(nonExistRoomId)).verifyError(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void deleteRoomWithConnectionString(HttpClient httpClient) { roomsAsyncClient = setupAsyncClient(httpClient, "deleteRoomWithConnectionString"); assertNotNull(roomsAsyncClient); StepVerifier.create(roomsAsyncClient.deleteRoomWithResponse(nonExistRoomId)).verifyError(); } private RoomsAsyncClient setupAsyncClient(HttpClient httpClient, String testName) { RoomsClientBuilder builder = getRoomsClientWithConnectionString(httpClient, RoomsServiceVersion.V2023_03_31_PREVIEW); communicationClient = getCommunicationIdentityClientBuilder(httpClient).buildClient(); return addLoggingPolicy(builder, testName).buildAsyncClient(); } }
Mismatched method name with comment.
public void onNext(AmqpReceiveLink receiver) { if (done) { receiver.closeAsync().subscribe(); Operators.onNextDropped(receiver, messageSubscriber.currentContext()); return; } final ReactorReceiverMediator mediator = new ReactorReceiverMediator(this, receiver, prefetch, creditFlowMode, logger); if (mediatorHolder.trySet(mediator)) { mediator.onParentReady(); } else { logger.atWarning() .addKeyValue("oldLinkName", mediatorHolder.getLinkName()) .addKeyValue(LINK_NAME_KEY, receiver.getLinkName()) .addKeyValue(ENTITY_PATH_KEY, receiver.getEntityPath()) .log("Got a AmqpReceiveLink when the MessageFlux is already terminated."); receiver.closeAsync().subscribe(); Operators.onDiscard(receiver, messageSubscriber.currentContext()); } }
public void onNext(AmqpReceiveLink receiver) { if (done) { receiver.closeAsync().subscribe(); Operators.onNextDropped(receiver, messageSubscriber.currentContext()); return; } final ReactorReceiverMediator mediator = new ReactorReceiverMediator(this, receiver, prefetch, creditFlowMode, logger); if (mediatorHolder.trySet(mediator)) { mediator.onParentReady(); } else { logger.atWarning() .addKeyValue("oldLinkName", mediatorHolder.getLinkName()) .addKeyValue(LINK_NAME_KEY, receiver.getLinkName()) .addKeyValue(ENTITY_PATH_KEY, receiver.getEntityPath()) .log("Got a AmqpReceiveLink when the MessageFlux is already terminated."); receiver.closeAsync().subscribe(); Operators.onDiscard(receiver, messageSubscriber.currentContext()); } }
class RecoverableReactorReceiver implements CoreSubscriber<AmqpReceiveLink>, Subscription { private final MediatorHolder mediatorHolder = new MediatorHolder(); private final MessageFlux parent; private final int prefetch; private final CreditFlowMode creditFlowMode; private final AmqpRetryPolicy retryPolicy; private final ClientLogger logger; private final AtomicInteger retryAttempts = new AtomicInteger(); private final CoreSubscriber<? super Message> messageSubscriber; private Subscription upstream; private volatile long requested; @SuppressWarnings("rawtypes") private static final AtomicLongFieldUpdater<RecoverableReactorReceiver> REQUESTED = AtomicLongFieldUpdater.newUpdater(RecoverableReactorReceiver.class, "requested"); private volatile int wip; @SuppressWarnings("rawtypes") private static final AtomicIntegerFieldUpdater<RecoverableReactorReceiver> WIP = AtomicIntegerFieldUpdater.newUpdater(RecoverableReactorReceiver.class, "wip"); private volatile boolean done; private volatile boolean cancelled; volatile Throwable error; @SuppressWarnings("rawtypes") static final AtomicReferenceFieldUpdater<RecoverableReactorReceiver, Throwable> ERROR = AtomicReferenceFieldUpdater.newUpdater(RecoverableReactorReceiver.class, Throwable.class, "error"); /** * Create a recoverable-receiver that supports the message-flux to stream messages from the receiver attached * to a messaging entity to the message-flux's downstream subscriber and recover from receiver termination. * * @param parent the parent message-flux. * @param messageSubscriber the message-flux's downstream subscriber to notify the events. * @param prefetch the number of messages that the operator should prefetch from the messaging entity * (for a less chatty network and faster message processing on the client). * @param creditFlowMode the mode indicating how to compute the credit and when to send it to the broker. * @param retryPolicy the retry policy to use to recover from receiver termination. */ RecoverableReactorReceiver(MessageFlux parent, CoreSubscriber<? super Message> messageSubscriber, int prefetch, CreditFlowMode creditFlowMode, AmqpRetryPolicy retryPolicy) { this.parent = parent; this.messageSubscriber = messageSubscriber; this.prefetch = prefetch; this.creditFlowMode = creditFlowMode; this.retryPolicy = retryPolicy; this.logger = parent.logger; } /** * Invoked by the upstream in response to message-flux subscribing to it. * * @param s the subscription handle for requesting receivers from the upstream or terminating upstream * through cancellation when it is no longer needed. */ @Override public void onSubscribe(Subscription s) { if (Operators.validate(upstream, s)) { upstream = s; messageSubscriber.onSubscribe(this); s.request(1); } } /** * Invoked by the upstream to deliver new receiver. * * @param receiver the new receiver. */ @Override /** * Signals operator termination with error. * <ul> * <li>Invoked by the upstream when it errors, or</li> * <li>Invoked by the drain-loop when it detects a 'non-retriable or retry exhaust' error, or </li> * <li>Invoked by the drain-loop when it detects the receiver signaled terminal completion and retry * is disabled (i.e., NULL_RETRY_POLICY is set).</li> * </ul> * * @param e the error signaled. */ @Override public void onError(Throwable e) { if (done) { Operators.onErrorDropped(e, messageSubscriber.currentContext()); return; } if (Exceptions.addThrowable(ERROR, this, e)) { done = true; final String logMessage; if (retryPolicy == NULL_RETRY_POLICY) { logMessage = "Terminal error signal from Upstream|Receiver arrived at MessageFlux."; } else { logMessage = "Terminal error signal from Upstream|RetryLoop arrived at MessageFlux."; } mediatorHolder.withReceiverInfo(logger.atWarning()).log(logMessage, e); drain(null); } else { Operators.onErrorDropped(e, messageSubscriber.currentContext()); } } /** * Signals operator termination with completion. * <ul> * <li>Invoked by the upstream when it completes, or</li> * <li>Invoked by the the drain-loop when it detects first receiver signaled terminal completion and retry * is disabled (i.e., NULL_RETRY_POLICY is set).</li> * </ul> */ @Override public void onComplete() { if (done) { return; } done = true; final String logMessage; if (retryPolicy == NULL_RETRY_POLICY) { logMessage = "Terminal completion signal from Upstream|Receiver arrived at MessageFlux."; } else { logMessage = "Terminal completion signal from Upstream arrived at MessageFlux."; } mediatorHolder.withReceiverInfo(logger.atWarning()).log(logMessage); drain(null); } /** * Invoked by the downstream message subscriber to signal the demand for messages. Whatever has been * requested can be sent downstream, so only signal the demand for what can be safely handled. * No messages will be sent downstream until the demand is signaled. * * @param n the number of messages to send to downstream. */ @Override public void request(long n) { if (Operators.validate(n)) { Operators.addCap(REQUESTED, this, n); drain(null); } } /** * Invoked by downstream to signal termination by cancellation. */ @Override public void cancel() { if (cancelled) { return; } cancelled = true; mediatorHolder.withReceiverInfo(logger.atWarning()) .log("Downstream cancellation signal arrived at MessageFlux."); if (WIP.getAndIncrement(this) == 0) { upstream.cancel(); mediatorHolder.freeze(); } } /** * Invoked by the new mediator when it is ready to be used. A mediator constructed in 'onNext' moves * to ready state when its backing receiver is active. * * @param updateDispositionFunc the function to disposition messages from mediator's backing receiver. */ void onMediatorReady(BiFunction<String, DeliveryState, Mono<Void>> updateDispositionFunc) { retryAttempts.set(0); parent.onNextUpdateDispositionFunction(updateDispositionFunc); drain(null); } /** * The serialized entry point to drain-loop. * * @param dataSignal the message to drop if the operator is terminated by cancellation. */ void drain(Message dataSignal) { if (WIP.getAndIncrement(this) != 0) { if (dataSignal != null && cancelled) { Operators.onDiscard(dataSignal, messageSubscriber.currentContext()); } return; } drainLoop(); } /** * The serialized drain-loop (implementation patterns inspired from RxJava, Reactor Operators). * Reference: 'Operator Concurrency Primitives' series https: */ private void drainLoop() { int missed = 1; CoreSubscriber<? super Message> downstream = messageSubscriber; for (; ;) { boolean d = done; ReactorReceiverMediator mediator = mediatorHolder.mediator; boolean hasMediator = mediator != null; if (terminateIfCancelled(downstream, null)) { return; } if (terminateIfErrorOrCompletionSignaled(d, downstream, null)) { return; } long r = requested; long emitted = 0L; boolean mediatorTerminatedAndDrained = false; if (r != 0L && hasMediator) { Queue<Message> q = mediator.queue; while (emitted != r) { Message message = q.poll(); if (terminateIfCancelled(downstream, message)) { return; } if (terminateIfErrorOrCompletionSignaled(done, downstream, message)) { return; } boolean empty = message == null; if (empty && mediator.done) { mediatorTerminatedAndDrained = true; break; } if (empty) { break; } messageSubscriber.onNext(message); emitted++; } if (emitted == r) { if (mediator.queue.isEmpty() && mediator.done) { mediatorTerminatedAndDrained = true; } } if (emitted != 0 && r != Long.MAX_VALUE) { r = REQUESTED.addAndGet(this, -emitted); } mediator.update(r, emitted); } if (r == 0L && hasMediator) { if (terminateIfCancelled(downstream, null)) { return; } if (terminateIfErrorOrCompletionSignaled(done, downstream, null)) { return; } if (mediator.queue.isEmpty() && mediator.done) { mediatorTerminatedAndDrained = true; } } if (mediatorTerminatedAndDrained && !mediator.isRetryInitiated) { mediator.isRetryInitiated = true; mediator.closeAsync().subscribe(); setTerminationSignalOrScheduleNextMediatorRequest(mediator.error, downstream, mediatorHolder); } missed = WIP.addAndGet(this, -missed); if (missed == 0) { break; } } } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p/> * See if downstream signaled cancellation to terminate the operator, if so, react to the cancellation. * * @param downstream the downstream. * @param messageDropped the message that gets dropped if cancellation was signaled. * @return true if canceled, false otherwise. */ private boolean terminateIfCancelled(CoreSubscriber<? super Message> downstream, Message messageDropped) { if (cancelled) { Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); return true; } return false; } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p/> * See if there is a pending signal for the operator termination with error or completion, if so, react to it * by terminating downstream. * * @param d indicate if the operator termination was signaled. * @param downstream the downstream. * @param messageDropped the message that gets dropped if termination happened. * @return true if terminated, false otherwise. */ private boolean terminateIfErrorOrCompletionSignaled(boolean d, CoreSubscriber<? super Message> downstream, Message messageDropped) { if (d) { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); Throwable e = error; if (e != null && e != Exceptions.TERMINATED) { e = Exceptions.terminate(ERROR, this); Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); logBuilder.log("MessageFlux reached a terminal error-state, signaling it downstream.", e); downstream.onError(e); return true; } Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); logBuilder.log("MessageFlux reached a terminal completion-state, signaling it downstream."); downstream.onComplete(); return true; } return false; } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p/> * 1. When retry is enabled (i.e., NULL_RETRY_POLICY is not set) then schedule request (retry) for the next mediator if * <ul> * <li>the operator is not in a termination signaled state,</li> * <li> and * <ul> * <li>there is no error Or</li> * <li>error is retriable and the retry is not exhausted.</li> * </ul> * </li> * </ul> * 2. If retry is enabled (i.e., NULL_RETRY_POLICY is not set) and there is 'non-retriable or retry exhaust' * error, then set an error signal for the drain-loop to terminate the operator. * <br/> * 3. If retry is disabled (i.e., NULL_RETRY_POLICY is set), then set an error signal (if first receiver error-ed) * or completion signal (if first receiver completed) for the drain-loop to terminate the operator. * <br/> * @param error the error that leads to error-ed termination of the last mediator or {@code null} * if terminated with completion. * @param downstream the downstream. * @param mediatorHolder the mediator holder. */ private void setTerminationSignalOrScheduleNextMediatorRequest(Throwable error, CoreSubscriber<? super Message> downstream, MediatorHolder mediatorHolder) { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); if (cancelled || done) { logBuilder.log("MessageFlux reached terminal-state [done:{}, cancelled:{}].", done, cancelled); return; } if (retryPolicy == NULL_RETRY_POLICY) { if (error == null) { onComplete(); } else { onError(error); } return; } final Duration delay; if (error == null) { delay = Duration.ofSeconds(1); logBuilder.addKeyValue("retryAfter", delay.toMillis()) .log("Current mediator reached terminal completion-state (retriable:true)."); } else { final int attempt = retryAttempts.incrementAndGet(); delay = retryPolicy.calculateRetryDelay(error, attempt); if (delay != null) { logBuilder.addKeyValue("attempt", attempt) .addKeyValue("retryAfter", delay.toMillis()) .log("Current mediator reached terminal error-state (retriable:true).", error); } else { logBuilder.addKeyValue("attempt", attempt) .log("Current mediator reached terminal error-state (retriable:false) Or MessageFlux retries exhausted.", error); onError(error); return; } } try { scheduleNextMediatorRequest(delay, mediatorHolder); } catch (RejectedExecutionException ree) { final RuntimeException e = Operators.onRejectedExecution(ree, downstream.currentContext()); mediatorHolder.withReceiverInfo(logger.atWarning()) .log("Unable to schedule a request for a new mediator (retriable:false).", e); onError(e); } } /** * Schedule a task to request a new mediator. * * @param delay the backoff duration before requesting the next mediator. * @param mediatorHolder the mediator holder. * @throws RejectedExecutionException if the scheduler is unable to schedule the task. */ private void scheduleNextMediatorRequest(Duration delay, MediatorHolder mediatorHolder) { final Runnable task = () -> { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); if (cancelled || done) { logBuilder.log("During the backoff, MessageFlux reached terminal-state [done:{}, cancelled:{}].", done, cancelled); return; } logBuilder.log("Requesting a new mediator."); upstream.request(1); }; mediatorHolder.nextMediatorRequestDisposable = Schedulers.parallel().schedule(task, delay.toMillis(), TimeUnit.MILLISECONDS); } }
class RecoverableReactorReceiver implements CoreSubscriber<AmqpReceiveLink>, Subscription { private final MediatorHolder mediatorHolder = new MediatorHolder(); private final MessageFlux parent; private final int prefetch; private final CreditFlowMode creditFlowMode; private final AmqpRetryPolicy retryPolicy; private final ClientLogger logger; private final AtomicInteger retryAttempts = new AtomicInteger(); private final CoreSubscriber<? super Message> messageSubscriber; private Subscription upstream; private volatile long requested; @SuppressWarnings("rawtypes") private static final AtomicLongFieldUpdater<RecoverableReactorReceiver> REQUESTED = AtomicLongFieldUpdater.newUpdater(RecoverableReactorReceiver.class, "requested"); private volatile int wip; @SuppressWarnings("rawtypes") private static final AtomicIntegerFieldUpdater<RecoverableReactorReceiver> WIP = AtomicIntegerFieldUpdater.newUpdater(RecoverableReactorReceiver.class, "wip"); private volatile boolean done; private volatile boolean cancelled; private volatile Throwable error; @SuppressWarnings("rawtypes") private static final AtomicReferenceFieldUpdater<RecoverableReactorReceiver, Throwable> ERROR = AtomicReferenceFieldUpdater.newUpdater(RecoverableReactorReceiver.class, Throwable.class, "error"); /** * Create a recoverable-receiver that supports the message-flux to stream messages from the receiver attached * to a messaging entity to the message-flux's downstream subscriber and recover from receiver termination. * * @param parent the parent message-flux. * @param messageSubscriber the message-flux's downstream subscriber to notify the events. * @param prefetch the number of messages that the operator should prefetch from the messaging entity * (for a less chatty network and faster message processing on the client). * @param creditFlowMode the mode indicating how to compute the credit and when to send it to the broker. * @param retryPolicy the retry policy to use to recover from receiver termination. */ RecoverableReactorReceiver(MessageFlux parent, CoreSubscriber<? super Message> messageSubscriber, int prefetch, CreditFlowMode creditFlowMode, AmqpRetryPolicy retryPolicy) { this.parent = parent; this.messageSubscriber = messageSubscriber; this.prefetch = prefetch; this.creditFlowMode = creditFlowMode; this.retryPolicy = retryPolicy; this.logger = parent.logger; } /** * Invoked by the upstream in response to message-flux subscribing to it. * * @param s the subscription handle for requesting receivers from the upstream or terminating upstream * through cancellation when it is no longer needed. */ @Override public void onSubscribe(Subscription s) { if (Operators.validate(upstream, s)) { upstream = s; messageSubscriber.onSubscribe(this); s.request(1); } } /** * Invoked by the upstream to deliver new receiver. * * @param receiver the new receiver. */ @Override /** * Signals operator termination with error. * <ul> * <li>Invoked by the upstream when it errors, or</li> * <li>Invoked by the drain-loop when it detects a 'non-retriable or retry exhaust' error, or </li> * <li>Invoked by the drain-loop when it detects the receiver signaled terminal completion and retry * is disabled (i.e., NULL_RETRY_POLICY is set).</li> * </ul> * * @param e the error signaled. */ @Override public void onError(Throwable e) { if (done) { Operators.onErrorDropped(e, messageSubscriber.currentContext()); return; } if (Exceptions.addThrowable(ERROR, this, e)) { done = true; final String logMessage; if (retryPolicy == NULL_RETRY_POLICY) { logMessage = "Terminal error signal from Upstream|Receiver arrived at MessageFlux."; } else { logMessage = "Terminal error signal from Upstream|RetryLoop arrived at MessageFlux."; } mediatorHolder.withReceiverInfo(logger.atWarning()).log(logMessage, e); drain(null); } else { Operators.onErrorDropped(e, messageSubscriber.currentContext()); } } /** * Signals operator termination with completion. * <ul> * <li>Invoked by the upstream when it completes, or</li> * <li>Invoked by the the drain-loop when it detects first receiver signaled terminal completion and retry * is disabled (i.e., NULL_RETRY_POLICY is set).</li> * </ul> */ @Override public void onComplete() { if (done) { return; } done = true; final String logMessage; if (retryPolicy == NULL_RETRY_POLICY) { logMessage = "Terminal completion signal from Upstream|Receiver arrived at MessageFlux."; } else { logMessage = "Terminal completion signal from Upstream arrived at MessageFlux."; } mediatorHolder.withReceiverInfo(logger.atWarning()).log(logMessage); drain(null); } /** * Invoked by the downstream message subscriber to signal the demand for messages. Whatever has been * requested can be sent downstream, so only signal the demand for what can be safely handled. * No messages will be sent downstream until the demand is signaled. * * @param n the number of messages to send to downstream. */ @Override public void request(long n) { if (Operators.validate(n)) { Operators.addCap(REQUESTED, this, n); drain(null); } } /** * Invoked by downstream to signal termination by cancellation. */ @Override public void cancel() { if (cancelled) { return; } cancelled = true; mediatorHolder.withReceiverInfo(logger.atWarning()) .log("Downstream cancellation signal arrived at MessageFlux."); if (WIP.getAndIncrement(this) == 0) { upstream.cancel(); mediatorHolder.freeze(); } } /** * Invoked by the new mediator when it is ready to be used. A mediator constructed in 'onNext' moves * to ready state when its backing receiver is active. * * @param updateDispositionFunc the function to disposition messages from mediator's backing receiver. */ void onMediatorReady(DispositionFunction updateDispositionFunc) { retryAttempts.set(0); parent.onNextUpdateDispositionFunction(updateDispositionFunc); drain(null); } /** * The serialized entry point to drain-loop. * * @param dataSignal the message to drop if the operator is terminated by cancellation. */ void drain(Message dataSignal) { if (WIP.getAndIncrement(this) != 0) { if (dataSignal != null && cancelled) { Operators.onDiscard(dataSignal, messageSubscriber.currentContext()); } return; } drainLoop(); } /** * The serialized drain-loop (implementation patterns inspired from RxJava, Reactor Operators). * Reference: 'Operator Concurrency Primitives' series https: */ private void drainLoop() { int missed = 1; CoreSubscriber<? super Message> downstream = messageSubscriber; for (; ;) { boolean d = done; ReactorReceiverMediator mediator = mediatorHolder.mediator; boolean hasMediator = mediator != null; if (terminateIfCancelled(downstream, null)) { return; } if (terminateIfErrorOrCompletionSignaled(d, downstream, null)) { return; } long r = requested; long emitted = 0L; boolean mediatorTerminatedAndDrained = false; if (r != 0L && hasMediator) { Queue<Message> q = mediator.queue; while (emitted != r) { Message message = q.poll(); if (terminateIfCancelled(downstream, message)) { return; } if (terminateIfErrorOrCompletionSignaled(done, downstream, message)) { return; } boolean empty = message == null; if (empty && mediator.done) { mediatorTerminatedAndDrained = true; break; } if (empty) { break; } messageSubscriber.onNext(message); emitted++; } if (emitted == r) { if (mediator.queue.isEmpty() && mediator.done) { mediatorTerminatedAndDrained = true; } } if (emitted != 0 && r != Long.MAX_VALUE) { r = REQUESTED.addAndGet(this, -emitted); } mediator.update(r, emitted); } if (r == 0L && hasMediator) { if (terminateIfCancelled(downstream, null)) { return; } if (terminateIfErrorOrCompletionSignaled(done, downstream, null)) { return; } if (mediator.queue.isEmpty() && mediator.done) { mediatorTerminatedAndDrained = true; } } if (mediatorTerminatedAndDrained && !mediator.isRetryInitiated) { mediator.isRetryInitiated = true; mediator.closeAsync().subscribe(); setTerminationSignalOrScheduleNextMediatorRequest(mediator.error, downstream, mediatorHolder); } missed = WIP.addAndGet(this, -missed); if (missed == 0) { break; } } } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p> * See if downstream signaled cancellation to terminate the operator, if so, react to the cancellation. * </p> * @param downstream the downstream. * @param messageDropped the message that gets dropped if cancellation was signaled. * @return true if canceled, false otherwise. */ private boolean terminateIfCancelled(CoreSubscriber<? super Message> downstream, Message messageDropped) { if (cancelled) { Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); return true; } return false; } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p> * See if there is a pending signal for the operator termination with error or completion, if so, react to it * by terminating downstream. * </p> * * @param d indicate if the operator termination was signaled. * @param downstream the downstream. * @param messageDropped the message that gets dropped if termination happened. * @return true if terminated, false otherwise. */ private boolean terminateIfErrorOrCompletionSignaled(boolean d, CoreSubscriber<? super Message> downstream, Message messageDropped) { if (d) { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); Throwable e = error; if (e != null && e != Exceptions.TERMINATED) { e = Exceptions.terminate(ERROR, this); Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); logBuilder.log("MessageFlux reached a terminal error-state, signaling it downstream.", e); downstream.onError(e); return true; } Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); logBuilder.log("MessageFlux reached a terminal completion-state, signaling it downstream."); downstream.onComplete(); return true; } return false; } /** * * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p></p> * 1. When retry is enabled (i.e., NULL_RETRY_POLICY is not set) then schedule request (retry) for the next mediator if * <ul> * <li>the operator is not in a termination signaled state,</li> * <li> and * <ul> * <li>there is no error Or</li> * <li>error is retriable and the retry is not exhausted.</li> * </ul> * </li> * </ul> * 2. If retry is enabled (i.e., NULL_RETRY_POLICY is not set) and there is 'non-retriable or retry exhaust' * error, then set an error signal for the drain-loop to terminate the operator. * <br/> * 3. If retry is disabled (i.e., NULL_RETRY_POLICY is set), then set an error signal (if first receiver error-ed) * or completion signal (if first receiver completed) for the drain-loop to terminate the operator. * <p></p> * @param error the error that leads to error-ed termination of the last mediator or {@code null} * if terminated with completion. * @param downstream the downstream. * @param mediatorHolder the mediator holder. */ private void setTerminationSignalOrScheduleNextMediatorRequest(Throwable error, CoreSubscriber<? super Message> downstream, MediatorHolder mediatorHolder) { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); if (cancelled || done) { logBuilder.log("MessageFlux reached terminal-state [done:{}, cancelled:{}].", done, cancelled); return; } if (retryPolicy == NULL_RETRY_POLICY) { if (error == null) { onComplete(); } else { onError(error); } return; } final Duration delay; if (error == null) { delay = Duration.ofSeconds(1); logBuilder.addKeyValue("retryAfter", delay.toMillis()) .log("Current mediator reached terminal completion-state (retriable:true)."); } else { final int attempt = retryAttempts.incrementAndGet(); delay = retryPolicy.calculateRetryDelay(error, attempt); if (delay != null) { logBuilder.addKeyValue("attempt", attempt) .addKeyValue("retryAfter", delay.toMillis()) .log("Current mediator reached terminal error-state (retriable:true).", error); } else { logBuilder.addKeyValue("attempt", attempt) .log("Current mediator reached terminal error-state (retriable:false) Or MessageFlux retries exhausted.", error); onError(error); return; } } try { scheduleNextMediatorRequest(delay, mediatorHolder); } catch (RejectedExecutionException ree) { final RuntimeException e = Operators.onRejectedExecution(ree, downstream.currentContext()); mediatorHolder.withReceiverInfo(logger.atWarning()) .log("Unable to schedule a request for a new mediator (retriable:false).", e); onError(e); } } /** * Schedule a task to request a new mediator. * * @param delay the backoff duration before requesting the next mediator. * @param mediatorHolder the mediator holder. * @throws RejectedExecutionException if the scheduler is unable to schedule the task. */ private void scheduleNextMediatorRequest(Duration delay, MediatorHolder mediatorHolder) { final Runnable task = () -> { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); if (cancelled || done) { logBuilder.log("During the backoff, MessageFlux reached terminal-state [done:{}, cancelled:{}].", done, cancelled); return; } logBuilder.log("Requesting a new mediator."); upstream.request(1); }; mediatorHolder.nextMediatorRequestDisposable = Schedulers.parallel().schedule(task, delay.toMillis(), TimeUnit.MILLISECONDS); } }
nit: nop -> no-op Thanks for the documentation. It is a great read.
public void cancel() { if (cancelled) { return; } cancelled = true; mediatorHolder.withReceiverInfo(logger.atWarning()) .log("Downstream cancellation signal arrived at MessageFlux."); if (WIP.getAndIncrement(this) == 0) { upstream.cancel(); mediatorHolder.freeze(); } }
public void cancel() { if (cancelled) { return; } cancelled = true; mediatorHolder.withReceiverInfo(logger.atWarning()) .log("Downstream cancellation signal arrived at MessageFlux."); if (WIP.getAndIncrement(this) == 0) { upstream.cancel(); mediatorHolder.freeze(); } }
class RecoverableReactorReceiver implements CoreSubscriber<AmqpReceiveLink>, Subscription { private final MediatorHolder mediatorHolder = new MediatorHolder(); private final MessageFlux parent; private final int prefetch; private final CreditFlowMode creditFlowMode; private final AmqpRetryPolicy retryPolicy; private final ClientLogger logger; private final AtomicInteger retryAttempts = new AtomicInteger(); private final CoreSubscriber<? super Message> messageSubscriber; private Subscription upstream; private volatile long requested; @SuppressWarnings("rawtypes") private static final AtomicLongFieldUpdater<RecoverableReactorReceiver> REQUESTED = AtomicLongFieldUpdater.newUpdater(RecoverableReactorReceiver.class, "requested"); private volatile int wip; @SuppressWarnings("rawtypes") private static final AtomicIntegerFieldUpdater<RecoverableReactorReceiver> WIP = AtomicIntegerFieldUpdater.newUpdater(RecoverableReactorReceiver.class, "wip"); private volatile boolean done; private volatile boolean cancelled; volatile Throwable error; @SuppressWarnings("rawtypes") static final AtomicReferenceFieldUpdater<RecoverableReactorReceiver, Throwable> ERROR = AtomicReferenceFieldUpdater.newUpdater(RecoverableReactorReceiver.class, Throwable.class, "error"); /** * Create a recoverable-receiver that supports the message-flux to stream messages from the receiver attached * to a messaging entity to the message-flux's downstream subscriber and recover from receiver termination. * * @param parent the parent message-flux. * @param messageSubscriber the message-flux's downstream subscriber to notify the events. * @param prefetch the number of messages that the operator should prefetch from the messaging entity * (for a less chatty network and faster message processing on the client). * @param creditFlowMode the mode indicating how to compute the credit and when to send it to the broker. * @param retryPolicy the retry policy to use to recover from receiver termination. */ RecoverableReactorReceiver(MessageFlux parent, CoreSubscriber<? super Message> messageSubscriber, int prefetch, CreditFlowMode creditFlowMode, AmqpRetryPolicy retryPolicy) { this.parent = parent; this.messageSubscriber = messageSubscriber; this.prefetch = prefetch; this.creditFlowMode = creditFlowMode; this.retryPolicy = retryPolicy; this.logger = parent.logger; } /** * Invoked by the upstream in response to message-flux subscribing to it. * * @param s the subscription handle for requesting receivers from the upstream or terminating upstream * through cancellation when it is no longer needed. */ @Override public void onSubscribe(Subscription s) { if (Operators.validate(upstream, s)) { upstream = s; messageSubscriber.onSubscribe(this); s.request(1); } } /** * Invoked by the upstream to deliver new receiver. * * @param receiver the new receiver. */ @Override public void onNext(AmqpReceiveLink receiver) { if (done) { receiver.closeAsync().subscribe(); Operators.onNextDropped(receiver, messageSubscriber.currentContext()); return; } final ReactorReceiverMediator mediator = new ReactorReceiverMediator(this, receiver, prefetch, creditFlowMode, logger); if (mediatorHolder.trySet(mediator)) { mediator.onParentReady(); } else { logger.atWarning() .addKeyValue("oldLinkName", mediatorHolder.getLinkName()) .addKeyValue(LINK_NAME_KEY, receiver.getLinkName()) .addKeyValue(ENTITY_PATH_KEY, receiver.getEntityPath()) .log("Got a AmqpReceiveLink when the MessageFlux is already terminated."); receiver.closeAsync().subscribe(); Operators.onDiscard(receiver, messageSubscriber.currentContext()); } } /** * Signals operator termination with error. * <ul> * <li>Invoked by the upstream when it errors, or</li> * <li>Invoked by the drain-loop when it detects a 'non-retriable or retry exhaust' error, or </li> * <li>Invoked by the drain-loop when it detects the receiver signaled terminal completion and retry * is disabled (i.e., NULL_RETRY_POLICY is set).</li> * </ul> * * @param e the error signaled. */ @Override public void onError(Throwable e) { if (done) { Operators.onErrorDropped(e, messageSubscriber.currentContext()); return; } if (Exceptions.addThrowable(ERROR, this, e)) { done = true; final String logMessage; if (retryPolicy == NULL_RETRY_POLICY) { logMessage = "Terminal error signal from Upstream|Receiver arrived at MessageFlux."; } else { logMessage = "Terminal error signal from Upstream|RetryLoop arrived at MessageFlux."; } mediatorHolder.withReceiverInfo(logger.atWarning()).log(logMessage, e); drain(null); } else { Operators.onErrorDropped(e, messageSubscriber.currentContext()); } } /** * Signals operator termination with completion. * <ul> * <li>Invoked by the upstream when it completes, or</li> * <li>Invoked by the the drain-loop when it detects first receiver signaled terminal completion and retry * is disabled (i.e., NULL_RETRY_POLICY is set).</li> * </ul> */ @Override public void onComplete() { if (done) { return; } done = true; final String logMessage; if (retryPolicy == NULL_RETRY_POLICY) { logMessage = "Terminal completion signal from Upstream|Receiver arrived at MessageFlux."; } else { logMessage = "Terminal completion signal from Upstream arrived at MessageFlux."; } mediatorHolder.withReceiverInfo(logger.atWarning()).log(logMessage); drain(null); } /** * Invoked by the downstream message subscriber to signal the demand for messages. Whatever has been * requested can be sent downstream, so only signal the demand for what can be safely handled. * No messages will be sent downstream until the demand is signaled. * * @param n the number of messages to send to downstream. */ @Override public void request(long n) { if (Operators.validate(n)) { Operators.addCap(REQUESTED, this, n); drain(null); } } /** * Invoked by downstream to signal termination by cancellation. */ @Override /** * Invoked by the new mediator when it is ready to be used. A mediator constructed in 'onNext' moves * to ready state when its backing receiver is active. * * @param updateDispositionFunc the function to disposition messages from mediator's backing receiver. */ void onMediatorReady(BiFunction<String, DeliveryState, Mono<Void>> updateDispositionFunc) { retryAttempts.set(0); parent.onNextUpdateDispositionFunction(updateDispositionFunc); drain(null); } /** * The serialized entry point to drain-loop. * * @param dataSignal the message to drop if the operator is terminated by cancellation. */ void drain(Message dataSignal) { if (WIP.getAndIncrement(this) != 0) { if (dataSignal != null && cancelled) { Operators.onDiscard(dataSignal, messageSubscriber.currentContext()); } return; } drainLoop(); } /** * The serialized drain-loop (implementation patterns inspired from RxJava, Reactor Operators). * Reference: 'Operator Concurrency Primitives' series https: */ private void drainLoop() { int missed = 1; CoreSubscriber<? super Message> downstream = messageSubscriber; for (; ;) { boolean d = done; ReactorReceiverMediator mediator = mediatorHolder.mediator; boolean hasMediator = mediator != null; if (terminateIfCancelled(downstream, null)) { return; } if (terminateIfErrorOrCompletionSignaled(d, downstream, null)) { return; } long r = requested; long emitted = 0L; boolean mediatorTerminatedAndDrained = false; if (r != 0L && hasMediator) { Queue<Message> q = mediator.queue; while (emitted != r) { Message message = q.poll(); if (terminateIfCancelled(downstream, message)) { return; } if (terminateIfErrorOrCompletionSignaled(done, downstream, message)) { return; } boolean empty = message == null; if (empty && mediator.done) { mediatorTerminatedAndDrained = true; break; } if (empty) { break; } messageSubscriber.onNext(message); emitted++; } if (emitted == r) { if (mediator.queue.isEmpty() && mediator.done) { mediatorTerminatedAndDrained = true; } } if (emitted != 0 && r != Long.MAX_VALUE) { r = REQUESTED.addAndGet(this, -emitted); } mediator.update(r, emitted); } if (r == 0L && hasMediator) { if (terminateIfCancelled(downstream, null)) { return; } if (terminateIfErrorOrCompletionSignaled(done, downstream, null)) { return; } if (mediator.queue.isEmpty() && mediator.done) { mediatorTerminatedAndDrained = true; } } if (mediatorTerminatedAndDrained && !mediator.isRetryInitiated) { mediator.isRetryInitiated = true; mediator.closeAsync().subscribe(); setTerminationSignalOrScheduleNextMediatorRequest(mediator.error, downstream, mediatorHolder); } missed = WIP.addAndGet(this, -missed); if (missed == 0) { break; } } } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p/> * See if downstream signaled cancellation to terminate the operator, if so, react to the cancellation. * * @param downstream the downstream. * @param messageDropped the message that gets dropped if cancellation was signaled. * @return true if canceled, false otherwise. */ private boolean terminateIfCancelled(CoreSubscriber<? super Message> downstream, Message messageDropped) { if (cancelled) { Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); return true; } return false; } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p/> * See if there is a pending signal for the operator termination with error or completion, if so, react to it * by terminating downstream. * * @param d indicate if the operator termination was signaled. * @param downstream the downstream. * @param messageDropped the message that gets dropped if termination happened. * @return true if terminated, false otherwise. */ private boolean terminateIfErrorOrCompletionSignaled(boolean d, CoreSubscriber<? super Message> downstream, Message messageDropped) { if (d) { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); Throwable e = error; if (e != null && e != Exceptions.TERMINATED) { e = Exceptions.terminate(ERROR, this); Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); logBuilder.log("MessageFlux reached a terminal error-state, signaling it downstream.", e); downstream.onError(e); return true; } Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); logBuilder.log("MessageFlux reached a terminal completion-state, signaling it downstream."); downstream.onComplete(); return true; } return false; } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p/> * 1. When retry is enabled (i.e., NULL_RETRY_POLICY is not set) then schedule request (retry) for the next mediator if * <ul> * <li>the operator is not in a termination signaled state,</li> * <li> and * <ul> * <li>there is no error Or</li> * <li>error is retriable and the retry is not exhausted.</li> * </ul> * </li> * </ul> * 2. If retry is enabled (i.e., NULL_RETRY_POLICY is not set) and there is 'non-retriable or retry exhaust' * error, then set an error signal for the drain-loop to terminate the operator. * <br/> * 3. If retry is disabled (i.e., NULL_RETRY_POLICY is set), then set an error signal (if first receiver error-ed) * or completion signal (if first receiver completed) for the drain-loop to terminate the operator. * <br/> * @param error the error that leads to error-ed termination of the last mediator or {@code null} * if terminated with completion. * @param downstream the downstream. * @param mediatorHolder the mediator holder. */ private void setTerminationSignalOrScheduleNextMediatorRequest(Throwable error, CoreSubscriber<? super Message> downstream, MediatorHolder mediatorHolder) { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); if (cancelled || done) { logBuilder.log("MessageFlux reached terminal-state [done:{}, cancelled:{}].", done, cancelled); return; } if (retryPolicy == NULL_RETRY_POLICY) { if (error == null) { onComplete(); } else { onError(error); } return; } final Duration delay; if (error == null) { delay = Duration.ofSeconds(1); logBuilder.addKeyValue("retryAfter", delay.toMillis()) .log("Current mediator reached terminal completion-state (retriable:true)."); } else { final int attempt = retryAttempts.incrementAndGet(); delay = retryPolicy.calculateRetryDelay(error, attempt); if (delay != null) { logBuilder.addKeyValue("attempt", attempt) .addKeyValue("retryAfter", delay.toMillis()) .log("Current mediator reached terminal error-state (retriable:true).", error); } else { logBuilder.addKeyValue("attempt", attempt) .log("Current mediator reached terminal error-state (retriable:false) Or MessageFlux retries exhausted.", error); onError(error); return; } } try { scheduleNextMediatorRequest(delay, mediatorHolder); } catch (RejectedExecutionException ree) { final RuntimeException e = Operators.onRejectedExecution(ree, downstream.currentContext()); mediatorHolder.withReceiverInfo(logger.atWarning()) .log("Unable to schedule a request for a new mediator (retriable:false).", e); onError(e); } } /** * Schedule a task to request a new mediator. * * @param delay the backoff duration before requesting the next mediator. * @param mediatorHolder the mediator holder. * @throws RejectedExecutionException if the scheduler is unable to schedule the task. */ private void scheduleNextMediatorRequest(Duration delay, MediatorHolder mediatorHolder) { final Runnable task = () -> { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); if (cancelled || done) { logBuilder.log("During the backoff, MessageFlux reached terminal-state [done:{}, cancelled:{}].", done, cancelled); return; } logBuilder.log("Requesting a new mediator."); upstream.request(1); }; mediatorHolder.nextMediatorRequestDisposable = Schedulers.parallel().schedule(task, delay.toMillis(), TimeUnit.MILLISECONDS); } }
class RecoverableReactorReceiver implements CoreSubscriber<AmqpReceiveLink>, Subscription { private final MediatorHolder mediatorHolder = new MediatorHolder(); private final MessageFlux parent; private final int prefetch; private final CreditFlowMode creditFlowMode; private final AmqpRetryPolicy retryPolicy; private final ClientLogger logger; private final AtomicInteger retryAttempts = new AtomicInteger(); private final CoreSubscriber<? super Message> messageSubscriber; private Subscription upstream; private volatile long requested; @SuppressWarnings("rawtypes") private static final AtomicLongFieldUpdater<RecoverableReactorReceiver> REQUESTED = AtomicLongFieldUpdater.newUpdater(RecoverableReactorReceiver.class, "requested"); private volatile int wip; @SuppressWarnings("rawtypes") private static final AtomicIntegerFieldUpdater<RecoverableReactorReceiver> WIP = AtomicIntegerFieldUpdater.newUpdater(RecoverableReactorReceiver.class, "wip"); private volatile boolean done; private volatile boolean cancelled; private volatile Throwable error; @SuppressWarnings("rawtypes") private static final AtomicReferenceFieldUpdater<RecoverableReactorReceiver, Throwable> ERROR = AtomicReferenceFieldUpdater.newUpdater(RecoverableReactorReceiver.class, Throwable.class, "error"); /** * Create a recoverable-receiver that supports the message-flux to stream messages from the receiver attached * to a messaging entity to the message-flux's downstream subscriber and recover from receiver termination. * * @param parent the parent message-flux. * @param messageSubscriber the message-flux's downstream subscriber to notify the events. * @param prefetch the number of messages that the operator should prefetch from the messaging entity * (for a less chatty network and faster message processing on the client). * @param creditFlowMode the mode indicating how to compute the credit and when to send it to the broker. * @param retryPolicy the retry policy to use to recover from receiver termination. */ RecoverableReactorReceiver(MessageFlux parent, CoreSubscriber<? super Message> messageSubscriber, int prefetch, CreditFlowMode creditFlowMode, AmqpRetryPolicy retryPolicy) { this.parent = parent; this.messageSubscriber = messageSubscriber; this.prefetch = prefetch; this.creditFlowMode = creditFlowMode; this.retryPolicy = retryPolicy; this.logger = parent.logger; } /** * Invoked by the upstream in response to message-flux subscribing to it. * * @param s the subscription handle for requesting receivers from the upstream or terminating upstream * through cancellation when it is no longer needed. */ @Override public void onSubscribe(Subscription s) { if (Operators.validate(upstream, s)) { upstream = s; messageSubscriber.onSubscribe(this); s.request(1); } } /** * Invoked by the upstream to deliver new receiver. * * @param receiver the new receiver. */ @Override public void onNext(AmqpReceiveLink receiver) { if (done) { receiver.closeAsync().subscribe(); Operators.onNextDropped(receiver, messageSubscriber.currentContext()); return; } final ReactorReceiverMediator mediator = new ReactorReceiverMediator(this, receiver, prefetch, creditFlowMode, logger); if (mediatorHolder.trySet(mediator)) { mediator.onParentReady(); } else { logger.atWarning() .addKeyValue("oldLinkName", mediatorHolder.getLinkName()) .addKeyValue(LINK_NAME_KEY, receiver.getLinkName()) .addKeyValue(ENTITY_PATH_KEY, receiver.getEntityPath()) .log("Got a AmqpReceiveLink when the MessageFlux is already terminated."); receiver.closeAsync().subscribe(); Operators.onDiscard(receiver, messageSubscriber.currentContext()); } } /** * Signals operator termination with error. * <ul> * <li>Invoked by the upstream when it errors, or</li> * <li>Invoked by the drain-loop when it detects a 'non-retriable or retry exhaust' error, or </li> * <li>Invoked by the drain-loop when it detects the receiver signaled terminal completion and retry * is disabled (i.e., NULL_RETRY_POLICY is set).</li> * </ul> * * @param e the error signaled. */ @Override public void onError(Throwable e) { if (done) { Operators.onErrorDropped(e, messageSubscriber.currentContext()); return; } if (Exceptions.addThrowable(ERROR, this, e)) { done = true; final String logMessage; if (retryPolicy == NULL_RETRY_POLICY) { logMessage = "Terminal error signal from Upstream|Receiver arrived at MessageFlux."; } else { logMessage = "Terminal error signal from Upstream|RetryLoop arrived at MessageFlux."; } mediatorHolder.withReceiverInfo(logger.atWarning()).log(logMessage, e); drain(null); } else { Operators.onErrorDropped(e, messageSubscriber.currentContext()); } } /** * Signals operator termination with completion. * <ul> * <li>Invoked by the upstream when it completes, or</li> * <li>Invoked by the the drain-loop when it detects first receiver signaled terminal completion and retry * is disabled (i.e., NULL_RETRY_POLICY is set).</li> * </ul> */ @Override public void onComplete() { if (done) { return; } done = true; final String logMessage; if (retryPolicy == NULL_RETRY_POLICY) { logMessage = "Terminal completion signal from Upstream|Receiver arrived at MessageFlux."; } else { logMessage = "Terminal completion signal from Upstream arrived at MessageFlux."; } mediatorHolder.withReceiverInfo(logger.atWarning()).log(logMessage); drain(null); } /** * Invoked by the downstream message subscriber to signal the demand for messages. Whatever has been * requested can be sent downstream, so only signal the demand for what can be safely handled. * No messages will be sent downstream until the demand is signaled. * * @param n the number of messages to send to downstream. */ @Override public void request(long n) { if (Operators.validate(n)) { Operators.addCap(REQUESTED, this, n); drain(null); } } /** * Invoked by downstream to signal termination by cancellation. */ @Override /** * Invoked by the new mediator when it is ready to be used. A mediator constructed in 'onNext' moves * to ready state when its backing receiver is active. * * @param updateDispositionFunc the function to disposition messages from mediator's backing receiver. */ void onMediatorReady(DispositionFunction updateDispositionFunc) { retryAttempts.set(0); parent.onNextUpdateDispositionFunction(updateDispositionFunc); drain(null); } /** * The serialized entry point to drain-loop. * * @param dataSignal the message to drop if the operator is terminated by cancellation. */ void drain(Message dataSignal) { if (WIP.getAndIncrement(this) != 0) { if (dataSignal != null && cancelled) { Operators.onDiscard(dataSignal, messageSubscriber.currentContext()); } return; } drainLoop(); } /** * The serialized drain-loop (implementation patterns inspired from RxJava, Reactor Operators). * Reference: 'Operator Concurrency Primitives' series https: */ private void drainLoop() { int missed = 1; CoreSubscriber<? super Message> downstream = messageSubscriber; for (; ;) { boolean d = done; ReactorReceiverMediator mediator = mediatorHolder.mediator; boolean hasMediator = mediator != null; if (terminateIfCancelled(downstream, null)) { return; } if (terminateIfErrorOrCompletionSignaled(d, downstream, null)) { return; } long r = requested; long emitted = 0L; boolean mediatorTerminatedAndDrained = false; if (r != 0L && hasMediator) { Queue<Message> q = mediator.queue; while (emitted != r) { Message message = q.poll(); if (terminateIfCancelled(downstream, message)) { return; } if (terminateIfErrorOrCompletionSignaled(done, downstream, message)) { return; } boolean empty = message == null; if (empty && mediator.done) { mediatorTerminatedAndDrained = true; break; } if (empty) { break; } messageSubscriber.onNext(message); emitted++; } if (emitted == r) { if (mediator.queue.isEmpty() && mediator.done) { mediatorTerminatedAndDrained = true; } } if (emitted != 0 && r != Long.MAX_VALUE) { r = REQUESTED.addAndGet(this, -emitted); } mediator.update(r, emitted); } if (r == 0L && hasMediator) { if (terminateIfCancelled(downstream, null)) { return; } if (terminateIfErrorOrCompletionSignaled(done, downstream, null)) { return; } if (mediator.queue.isEmpty() && mediator.done) { mediatorTerminatedAndDrained = true; } } if (mediatorTerminatedAndDrained && !mediator.isRetryInitiated) { mediator.isRetryInitiated = true; mediator.closeAsync().subscribe(); setTerminationSignalOrScheduleNextMediatorRequest(mediator.error, downstream, mediatorHolder); } missed = WIP.addAndGet(this, -missed); if (missed == 0) { break; } } } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p> * See if downstream signaled cancellation to terminate the operator, if so, react to the cancellation. * </p> * @param downstream the downstream. * @param messageDropped the message that gets dropped if cancellation was signaled. * @return true if canceled, false otherwise. */ private boolean terminateIfCancelled(CoreSubscriber<? super Message> downstream, Message messageDropped) { if (cancelled) { Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); return true; } return false; } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p> * See if there is a pending signal for the operator termination with error or completion, if so, react to it * by terminating downstream. * </p> * * @param d indicate if the operator termination was signaled. * @param downstream the downstream. * @param messageDropped the message that gets dropped if termination happened. * @return true if terminated, false otherwise. */ private boolean terminateIfErrorOrCompletionSignaled(boolean d, CoreSubscriber<? super Message> downstream, Message messageDropped) { if (d) { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); Throwable e = error; if (e != null && e != Exceptions.TERMINATED) { e = Exceptions.terminate(ERROR, this); Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); logBuilder.log("MessageFlux reached a terminal error-state, signaling it downstream.", e); downstream.onError(e); return true; } Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); logBuilder.log("MessageFlux reached a terminal completion-state, signaling it downstream."); downstream.onComplete(); return true; } return false; } /** * * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p></p> * 1. When retry is enabled (i.e., NULL_RETRY_POLICY is not set) then schedule request (retry) for the next mediator if * <ul> * <li>the operator is not in a termination signaled state,</li> * <li> and * <ul> * <li>there is no error Or</li> * <li>error is retriable and the retry is not exhausted.</li> * </ul> * </li> * </ul> * 2. If retry is enabled (i.e., NULL_RETRY_POLICY is not set) and there is 'non-retriable or retry exhaust' * error, then set an error signal for the drain-loop to terminate the operator. * <br/> * 3. If retry is disabled (i.e., NULL_RETRY_POLICY is set), then set an error signal (if first receiver error-ed) * or completion signal (if first receiver completed) for the drain-loop to terminate the operator. * <p></p> * @param error the error that leads to error-ed termination of the last mediator or {@code null} * if terminated with completion. * @param downstream the downstream. * @param mediatorHolder the mediator holder. */ private void setTerminationSignalOrScheduleNextMediatorRequest(Throwable error, CoreSubscriber<? super Message> downstream, MediatorHolder mediatorHolder) { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); if (cancelled || done) { logBuilder.log("MessageFlux reached terminal-state [done:{}, cancelled:{}].", done, cancelled); return; } if (retryPolicy == NULL_RETRY_POLICY) { if (error == null) { onComplete(); } else { onError(error); } return; } final Duration delay; if (error == null) { delay = Duration.ofSeconds(1); logBuilder.addKeyValue("retryAfter", delay.toMillis()) .log("Current mediator reached terminal completion-state (retriable:true)."); } else { final int attempt = retryAttempts.incrementAndGet(); delay = retryPolicy.calculateRetryDelay(error, attempt); if (delay != null) { logBuilder.addKeyValue("attempt", attempt) .addKeyValue("retryAfter", delay.toMillis()) .log("Current mediator reached terminal error-state (retriable:true).", error); } else { logBuilder.addKeyValue("attempt", attempt) .log("Current mediator reached terminal error-state (retriable:false) Or MessageFlux retries exhausted.", error); onError(error); return; } } try { scheduleNextMediatorRequest(delay, mediatorHolder); } catch (RejectedExecutionException ree) { final RuntimeException e = Operators.onRejectedExecution(ree, downstream.currentContext()); mediatorHolder.withReceiverInfo(logger.atWarning()) .log("Unable to schedule a request for a new mediator (retriable:false).", e); onError(e); } } /** * Schedule a task to request a new mediator. * * @param delay the backoff duration before requesting the next mediator. * @param mediatorHolder the mediator holder. * @throws RejectedExecutionException if the scheduler is unable to schedule the task. */ private void scheduleNextMediatorRequest(Duration delay, MediatorHolder mediatorHolder) { final Runnable task = () -> { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); if (cancelled || done) { logBuilder.log("During the backoff, MessageFlux reached terminal-state [done:{}, cancelled:{}].", done, cancelled); return; } logBuilder.log("Requesting a new mediator."); upstream.request(1); }; mediatorHolder.nextMediatorRequestDisposable = Schedulers.parallel().schedule(task, delay.toMillis(), TimeUnit.MILLISECONDS); } }
Should we make this either a static function or Mono.defer if this is going to be a pathway that is taken often? I am thinking back to when we had all those exception objects on a hot path.
public MessageFlux(Flux<? extends AmqpReceiveLink> source, int prefetch, CreditFlowMode creditFlowMode, AmqpRetryPolicy retryPolicy) { super(source); final Map<String, Object> loggingContext = new HashMap<>(1); loggingContext.put(MESSAGE_FLUX_KEY, StringUtil.getRandomString("mf")); this.logger = new ClientLogger(MessageFlux.class, loggingContext); if (prefetch < 0) { throw new IllegalArgumentException("prefetch >= 0 required but it was " + prefetch); } this.prefetch = prefetch; this.creditFlowMode = creditFlowMode; this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null."); this.updateDispositionFunc = (t, s) -> Mono.error(new IllegalStateException("Cannot update disposition as no receive-link is established.")); }
this.updateDispositionFunc = (t, s) -> Mono.error(new IllegalStateException("Cannot update disposition as no receive-link is established."));
public MessageFlux(Flux<? extends AmqpReceiveLink> source, int prefetch, CreditFlowMode creditFlowMode, AmqpRetryPolicy retryPolicy) { super(source); final Map<String, Object> loggingContext = new HashMap<>(1); loggingContext.put(MESSAGE_FLUX_KEY, StringUtil.getRandomString("mf")); this.logger = new ClientLogger(MessageFlux.class, loggingContext); if (prefetch < 0) { throw new IllegalArgumentException("prefetch >= 0 required but it was " + prefetch); } this.prefetch = prefetch; this.creditFlowMode = creditFlowMode; this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null."); this.updateDispositionFunc = DispositionFunction.NO_DISPOSITION; }
class MessageFlux extends FluxOperator<AmqpReceiveLink, Message> { /** An AmqpRetryPolicy const indicates that MessageFlux should terminate when the first receiver terminates * (i.e., disables the retry action to obtain next receiver from the upstream). **/ public static final AmqpRetryPolicy NULL_RETRY_POLICY = new FixedAmqpRetryPolicy(new AmqpRetryOptions()); private static final String MESSAGE_FLUX_KEY = "messageFlux"; private final ClientLogger logger; private final int prefetch; private final CreditFlowMode creditFlowMode; private final AmqpRetryPolicy retryPolicy; private volatile BiFunction<String, DeliveryState, Mono<Void>> updateDispositionFunc; /** * Create a message-flux to stream messages from a messaging entity to downstream subscriber. * * @param source the upstream source that, upon a request, provide a new receiver connected to the messaging entity. * @param prefetch the number of messages that the operator should prefetch from the messaging entity (for a * less chatty network and faster message processing on the client). * @param creditFlowMode the mode indicating how to compute the credit and when to send it to the broker. * @param retryPolicy the retry policy to use to obtain a new receiver upon current receiver termination. * @throws IllegalStateException if the {@code prefetch} is a negative value. * @throws NullPointerException if the {@code retryPolicy} is {@code null}. */ /** * Register the downstream subscriber. * * @param actual the downstream subscriber interested in the published messages and termination. */ @Override public void subscribe(CoreSubscriber<? super Message> actual) { source.subscribe(new RecoverableReactorReceiver(this, actual, prefetch, creditFlowMode, retryPolicy)); } /** * Updates the disposition state of a message uniquely identified by the given delivery tag. * * @param deliveryTag delivery tag of message. * @param deliveryState Delivery state of message. * * @return A Mono that completes when the state is successfully updated and acknowledged by message broker. */ public Mono<Void> updateDisposition(String deliveryTag, DeliveryState deliveryState) { final BiFunction<String, DeliveryState, Mono<Void>> updateDispositionFunc = this.updateDispositionFunc; return updateDispositionFunc.apply(deliveryTag, deliveryState); } /** * The callback invoked when next receiver is attached to the messaging entity from which this message-flux * instance stream messages. There will be only one receiver at a time, and this callback delivers the reference * to the function to disposition messages that arrives in the new receiver. * * @param updateDispositionFunc the function to disposition messages delivered by the current backing receiver. */ void onNextUpdateDispositionFunction(BiFunction<String, DeliveryState, Mono<Void>> updateDispositionFunc) { this.updateDispositionFunc = updateDispositionFunc; } /** * The underlying consumer and producer extension of the message-flux operator. The consuming side processes events * (about new receiver, terminal signals) from the upstream and events (messages, terminal signals) from * the current receiver. The producing side publishes the messages to message-flux's downstream. The type has * a recovery mechanism to obtain a new receiver from upstream upon the current receiver's termination. * Recoveries happen underneath while the messages flow transparently downstream. The type can terminate downstream * if the upstream terminates, the recovery path encounters a non-retriable error (i.e., the current receiver * terminated with a non-retriable error), or recovery retries exhaust. */ private static final class RecoverableReactorReceiver implements CoreSubscriber<AmqpReceiveLink>, Subscription { private final MediatorHolder mediatorHolder = new MediatorHolder(); private final MessageFlux parent; private final int prefetch; private final CreditFlowMode creditFlowMode; private final AmqpRetryPolicy retryPolicy; private final ClientLogger logger; private final AtomicInteger retryAttempts = new AtomicInteger(); private final CoreSubscriber<? super Message> messageSubscriber; private Subscription upstream; private volatile long requested; @SuppressWarnings("rawtypes") private static final AtomicLongFieldUpdater<RecoverableReactorReceiver> REQUESTED = AtomicLongFieldUpdater.newUpdater(RecoverableReactorReceiver.class, "requested"); private volatile int wip; @SuppressWarnings("rawtypes") private static final AtomicIntegerFieldUpdater<RecoverableReactorReceiver> WIP = AtomicIntegerFieldUpdater.newUpdater(RecoverableReactorReceiver.class, "wip"); private volatile boolean done; private volatile boolean cancelled; volatile Throwable error; @SuppressWarnings("rawtypes") static final AtomicReferenceFieldUpdater<RecoverableReactorReceiver, Throwable> ERROR = AtomicReferenceFieldUpdater.newUpdater(RecoverableReactorReceiver.class, Throwable.class, "error"); /** * Create a recoverable-receiver that supports the message-flux to stream messages from the receiver attached * to a messaging entity to the message-flux's downstream subscriber and recover from receiver termination. * * @param parent the parent message-flux. * @param messageSubscriber the message-flux's downstream subscriber to notify the events. * @param prefetch the number of messages that the operator should prefetch from the messaging entity * (for a less chatty network and faster message processing on the client). * @param creditFlowMode the mode indicating how to compute the credit and when to send it to the broker. * @param retryPolicy the retry policy to use to recover from receiver termination. */ RecoverableReactorReceiver(MessageFlux parent, CoreSubscriber<? super Message> messageSubscriber, int prefetch, CreditFlowMode creditFlowMode, AmqpRetryPolicy retryPolicy) { this.parent = parent; this.messageSubscriber = messageSubscriber; this.prefetch = prefetch; this.creditFlowMode = creditFlowMode; this.retryPolicy = retryPolicy; this.logger = parent.logger; } /** * Invoked by the upstream in response to message-flux subscribing to it. * * @param s the subscription handle for requesting receivers from the upstream or terminating upstream * through cancellation when it is no longer needed. */ @Override public void onSubscribe(Subscription s) { if (Operators.validate(upstream, s)) { upstream = s; messageSubscriber.onSubscribe(this); s.request(1); } } /** * Invoked by the upstream to deliver new receiver. * * @param receiver the new receiver. */ @Override public void onNext(AmqpReceiveLink receiver) { if (done) { receiver.closeAsync().subscribe(); Operators.onNextDropped(receiver, messageSubscriber.currentContext()); return; } final ReactorReceiverMediator mediator = new ReactorReceiverMediator(this, receiver, prefetch, creditFlowMode, logger); if (mediatorHolder.trySet(mediator)) { mediator.onParentReady(); } else { logger.atWarning() .addKeyValue("oldLinkName", mediatorHolder.getLinkName()) .addKeyValue(LINK_NAME_KEY, receiver.getLinkName()) .addKeyValue(ENTITY_PATH_KEY, receiver.getEntityPath()) .log("Got a AmqpReceiveLink when the MessageFlux is already terminated."); receiver.closeAsync().subscribe(); Operators.onDiscard(receiver, messageSubscriber.currentContext()); } } /** * Signals operator termination with error. * <ul> * <li>Invoked by the upstream when it errors, or</li> * <li>Invoked by the drain-loop when it detects a 'non-retriable or retry exhaust' error, or </li> * <li>Invoked by the drain-loop when it detects the receiver signaled terminal completion and retry * is disabled (i.e., NULL_RETRY_POLICY is set).</li> * </ul> * * @param e the error signaled. */ @Override public void onError(Throwable e) { if (done) { Operators.onErrorDropped(e, messageSubscriber.currentContext()); return; } if (Exceptions.addThrowable(ERROR, this, e)) { done = true; final String logMessage; if (retryPolicy == NULL_RETRY_POLICY) { logMessage = "Terminal error signal from Upstream|Receiver arrived at MessageFlux."; } else { logMessage = "Terminal error signal from Upstream|RetryLoop arrived at MessageFlux."; } mediatorHolder.withReceiverInfo(logger.atWarning()).log(logMessage, e); drain(null); } else { Operators.onErrorDropped(e, messageSubscriber.currentContext()); } } /** * Signals operator termination with completion. * <ul> * <li>Invoked by the upstream when it completes, or</li> * <li>Invoked by the the drain-loop when it detects first receiver signaled terminal completion and retry * is disabled (i.e., NULL_RETRY_POLICY is set).</li> * </ul> */ @Override public void onComplete() { if (done) { return; } done = true; final String logMessage; if (retryPolicy == NULL_RETRY_POLICY) { logMessage = "Terminal completion signal from Upstream|Receiver arrived at MessageFlux."; } else { logMessage = "Terminal completion signal from Upstream arrived at MessageFlux."; } mediatorHolder.withReceiverInfo(logger.atWarning()).log(logMessage); drain(null); } /** * Invoked by the downstream message subscriber to signal the demand for messages. Whatever has been * requested can be sent downstream, so only signal the demand for what can be safely handled. * No messages will be sent downstream until the demand is signaled. * * @param n the number of messages to send to downstream. */ @Override public void request(long n) { if (Operators.validate(n)) { Operators.addCap(REQUESTED, this, n); drain(null); } } /** * Invoked by downstream to signal termination by cancellation. */ @Override public void cancel() { if (cancelled) { return; } cancelled = true; mediatorHolder.withReceiverInfo(logger.atWarning()) .log("Downstream cancellation signal arrived at MessageFlux."); if (WIP.getAndIncrement(this) == 0) { upstream.cancel(); mediatorHolder.freeze(); } } /** * Invoked by the new mediator when it is ready to be used. A mediator constructed in 'onNext' moves * to ready state when its backing receiver is active. * * @param updateDispositionFunc the function to disposition messages from mediator's backing receiver. */ void onMediatorReady(BiFunction<String, DeliveryState, Mono<Void>> updateDispositionFunc) { retryAttempts.set(0); parent.onNextUpdateDispositionFunction(updateDispositionFunc); drain(null); } /** * The serialized entry point to drain-loop. * * @param dataSignal the message to drop if the operator is terminated by cancellation. */ void drain(Message dataSignal) { if (WIP.getAndIncrement(this) != 0) { if (dataSignal != null && cancelled) { Operators.onDiscard(dataSignal, messageSubscriber.currentContext()); } return; } drainLoop(); } /** * The serialized drain-loop (implementation patterns inspired from RxJava, Reactor Operators). * Reference: 'Operator Concurrency Primitives' series https: */ private void drainLoop() { int missed = 1; CoreSubscriber<? super Message> downstream = messageSubscriber; for (; ;) { boolean d = done; ReactorReceiverMediator mediator = mediatorHolder.mediator; boolean hasMediator = mediator != null; if (terminateIfCancelled(downstream, null)) { return; } if (terminateIfErrorOrCompletionSignaled(d, downstream, null)) { return; } long r = requested; long emitted = 0L; boolean mediatorTerminatedAndDrained = false; if (r != 0L && hasMediator) { Queue<Message> q = mediator.queue; while (emitted != r) { Message message = q.poll(); if (terminateIfCancelled(downstream, message)) { return; } if (terminateIfErrorOrCompletionSignaled(done, downstream, message)) { return; } boolean empty = message == null; if (empty && mediator.done) { mediatorTerminatedAndDrained = true; break; } if (empty) { break; } messageSubscriber.onNext(message); emitted++; } if (emitted == r) { if (mediator.queue.isEmpty() && mediator.done) { mediatorTerminatedAndDrained = true; } } if (emitted != 0 && r != Long.MAX_VALUE) { r = REQUESTED.addAndGet(this, -emitted); } mediator.update(r, emitted); } if (r == 0L && hasMediator) { if (terminateIfCancelled(downstream, null)) { return; } if (terminateIfErrorOrCompletionSignaled(done, downstream, null)) { return; } if (mediator.queue.isEmpty() && mediator.done) { mediatorTerminatedAndDrained = true; } } if (mediatorTerminatedAndDrained && !mediator.isRetryInitiated) { mediator.isRetryInitiated = true; mediator.closeAsync().subscribe(); setTerminationSignalOrScheduleNextMediatorRequest(mediator.error, downstream, mediatorHolder); } missed = WIP.addAndGet(this, -missed); if (missed == 0) { break; } } } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p/> * See if downstream signaled cancellation to terminate the operator, if so, react to the cancellation. * * @param downstream the downstream. * @param messageDropped the message that gets dropped if cancellation was signaled. * @return true if canceled, false otherwise. */ private boolean terminateIfCancelled(CoreSubscriber<? super Message> downstream, Message messageDropped) { if (cancelled) { Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); return true; } return false; } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p/> * See if there is a pending signal for the operator termination with error or completion, if so, react to it * by terminating downstream. * * @param d indicate if the operator termination was signaled. * @param downstream the downstream. * @param messageDropped the message that gets dropped if termination happened. * @return true if terminated, false otherwise. */ private boolean terminateIfErrorOrCompletionSignaled(boolean d, CoreSubscriber<? super Message> downstream, Message messageDropped) { if (d) { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); Throwable e = error; if (e != null && e != Exceptions.TERMINATED) { e = Exceptions.terminate(ERROR, this); Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); logBuilder.log("MessageFlux reached a terminal error-state, signaling it downstream.", e); downstream.onError(e); return true; } Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); logBuilder.log("MessageFlux reached a terminal completion-state, signaling it downstream."); downstream.onComplete(); return true; } return false; } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p/> * 1. When retry is enabled (i.e., NULL_RETRY_POLICY is not set) then schedule request (retry) for the next mediator if * <ul> * <li>the operator is not in a termination signaled state,</li> * <li> and * <ul> * <li>there is no error Or</li> * <li>error is retriable and the retry is not exhausted.</li> * </ul> * </li> * </ul> * 2. If retry is enabled (i.e., NULL_RETRY_POLICY is not set) and there is 'non-retriable or retry exhaust' * error, then set an error signal for the drain-loop to terminate the operator. * <br/> * 3. If retry is disabled (i.e., NULL_RETRY_POLICY is set), then set an error signal (if first receiver error-ed) * or completion signal (if first receiver completed) for the drain-loop to terminate the operator. * <br/> * @param error the error that leads to error-ed termination of the last mediator or {@code null} * if terminated with completion. * @param downstream the downstream. * @param mediatorHolder the mediator holder. */ private void setTerminationSignalOrScheduleNextMediatorRequest(Throwable error, CoreSubscriber<? super Message> downstream, MediatorHolder mediatorHolder) { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); if (cancelled || done) { logBuilder.log("MessageFlux reached terminal-state [done:{}, cancelled:{}].", done, cancelled); return; } if (retryPolicy == NULL_RETRY_POLICY) { if (error == null) { onComplete(); } else { onError(error); } return; } final Duration delay; if (error == null) { delay = Duration.ofSeconds(1); logBuilder.addKeyValue("retryAfter", delay.toMillis()) .log("Current mediator reached terminal completion-state (retriable:true)."); } else { final int attempt = retryAttempts.incrementAndGet(); delay = retryPolicy.calculateRetryDelay(error, attempt); if (delay != null) { logBuilder.addKeyValue("attempt", attempt) .addKeyValue("retryAfter", delay.toMillis()) .log("Current mediator reached terminal error-state (retriable:true).", error); } else { logBuilder.addKeyValue("attempt", attempt) .log("Current mediator reached terminal error-state (retriable:false) Or MessageFlux retries exhausted.", error); onError(error); return; } } try { scheduleNextMediatorRequest(delay, mediatorHolder); } catch (RejectedExecutionException ree) { final RuntimeException e = Operators.onRejectedExecution(ree, downstream.currentContext()); mediatorHolder.withReceiverInfo(logger.atWarning()) .log("Unable to schedule a request for a new mediator (retriable:false).", e); onError(e); } } /** * Schedule a task to request a new mediator. * * @param delay the backoff duration before requesting the next mediator. * @param mediatorHolder the mediator holder. * @throws RejectedExecutionException if the scheduler is unable to schedule the task. */ private void scheduleNextMediatorRequest(Duration delay, MediatorHolder mediatorHolder) { final Runnable task = () -> { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); if (cancelled || done) { logBuilder.log("During the backoff, MessageFlux reached terminal-state [done:{}, cancelled:{}].", done, cancelled); return; } logBuilder.log("Requesting a new mediator."); upstream.request(1); }; mediatorHolder.nextMediatorRequestDisposable = Schedulers.parallel().schedule(task, delay.toMillis(), TimeUnit.MILLISECONDS); } } /** * The mediator that coordinates between {@link RecoverableReactorReceiver} and a receiver {@link AmqpReceiveLink}. */ private static final class ReactorReceiverMediator implements AsyncCloseable, CoreSubscriber<Message>, Subscription { private static final Subscription CANCELLED_SUBSCRIPTION = Operators.cancelledSubscription(); private final RecoverableReactorReceiver parent; private final AmqpReceiveLink receiver; private final int prefetch; private final CreditFlowMode creditFlowMode; private final ClientLogger logger; private final Disposable.Composite endpointStateDisposable = Disposables.composite(); private CreditAccountingStrategy creditAccounting; private volatile boolean ready; private volatile Subscription s; @SuppressWarnings("rawtypes") private static final AtomicReferenceFieldUpdater<ReactorReceiverMediator, Subscription> S = AtomicReferenceFieldUpdater.newUpdater(ReactorReceiverMediator.class, Subscription.class, "s"); volatile Throwable error; @SuppressWarnings("rawtypes") static final AtomicReferenceFieldUpdater<ReactorReceiverMediator, Throwable> ERROR = AtomicReferenceFieldUpdater.newUpdater(ReactorReceiverMediator.class, Throwable.class, "error"); /** * The flag indicating if the mediator is terminated by completion or error. */ volatile boolean done; /** * The drain loop iteration that first identifies the mediator as terminated (done == true) and * and drained (queue.isEmpty() == true) will initiate a retry to obtain the next mediator. While that retry * completion is pending, any request for messages from downstream may lead to further drain loop iterations; * the 'isRetryInitiated' flag ensures those drain loop iterations (those also see the mediator as terminated * and drained) will not initiate duplicate retries. */ volatile boolean isRetryInitiated; /** * The queue holding messages from the backing receiver's message publisher, waiting to be drained by * the drain-loop iterations. */ final Queue<Message> queue; /** * Create a mediator to channel events (messages, termination) from a receiver to recoverable-receiver. * * @param parent the recoverable-receiver (a.k.a. parent). * @param receiver the receiver backing the mediator. * @param prefetch the number of messages to prefetch using the receiver (for a less chatty network * and faster message processing on the client). * @param creditFlowMode the mode indicating how to compute the credit and when to send it to the broker. */ ReactorReceiverMediator(RecoverableReactorReceiver parent, AmqpReceiveLink receiver, int prefetch, CreditFlowMode creditFlowMode, ClientLogger logger) { this.parent = parent; this.receiver = receiver; this.prefetch = prefetch; this.creditFlowMode = creditFlowMode; this.logger = logger; this.queue = Queues.<Message>get(Integer.MAX_VALUE).get(); } /** * Invoked by the parent {@link RecoverableReactorReceiver} when it is ready to use this new mediator * (The mediator facilitate communication between the parent and the new receiver ({@link AmqpReceiveLink}) that * mediator wraps). In response, this mediator notifies the parent about its readiness by invoking * {@link RecoverableReactorReceiver */ void onParentReady() { updateLogWithReceiverId(logger.atWarning()).log("Setting next mediator and waiting for activation."); receiver.receive().subscribe(this); final Disposable endpointDisposable = receiver.getEndpointStates() .filter(s -> s == AmqpEndpointState.ACTIVE) .publishOn(ReceiversPumpingScheduler.instance()) .doOnEach(event -> { if (event.isOnNext()) { assert event.get() == AmqpEndpointState.ACTIVE; if (!ready) { updateLogWithReceiverId(logger.atWarning()).log("The mediator is active."); ready = true; parent.onMediatorReady(this::updateDisposition); } return; } if (event.isOnError()) { final Throwable e = event.getThrowable(); updateLogWithReceiverId(logger.atWarning()).log("Receiver emitted terminal error.", e); onLinkError(e); return; } if (event.isOnComplete()) { updateLogWithReceiverId(logger.atWarning()).log("Receiver emitted terminal completion."); onLinkComplete(); } }).subscribe(__ -> { }, __ -> { }, () -> { }); endpointStateDisposable.add(endpointDisposable); } /** * Invoked in response to the subscription to the receiver's message publisher. * * @param s the subscription to request messages from the receiver's message publisher and terminate * that publisher through cancellation when it is no longer needed. */ @Override public void onSubscribe(Subscription s) { if (Operators.setOnce(S, this, s)) { switch (creditFlowMode) { case RequestDriven: creditAccounting = new RequestDrivenCreditAccountingStrategy(receiver, s, prefetch, logger); break; case EmissionDriven: creditAccounting = new EmissionDrivenCreditAccountingStrategy(receiver, s, prefetch, logger); break; default: throw new IllegalArgumentException("Unknown CreditFlowMode " + creditFlowMode); } } } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p/> * Notify the latest view of the downstream request and messages emitted by the emitter-loop during * the last drain-loop iteration. * * @param request the latest view of the downstream request. * @param emitted the number of messages emitted by the latest emitter-loop run. */ void update(long request, long emitted) { if (ready && !done) { creditAccounting.update(request, emitted); } } /** * Invoked by the receiver's message publisher to deliver a message. * * @param message the message. */ @Override public void onNext(Message message) { if (done) { Operators.onNextDropped(message, parent.currentContext()); return; } if (s == Operators.cancelledSubscription()) { Operators.onDiscard(message, parent.currentContext()); return; } if (queue.offer(message)) { parent.drain(message); } else { Operators.onOperatorError(this, Exceptions.failWithOverflow(Exceptions.BACKPRESSURE_ERROR_QUEUE_FULL), parent.messageSubscriber.currentContext()); Operators.onDiscard(message, parent.messageSubscriber.currentContext()); done = true; parent.drain(message); } } @Override public void onError(Throwable e) { } /** * Invoked by the receiver's endpoint publisher to signal mediator termination with an error. * * @param e the error signaled. */ private void onLinkError(Throwable e) { if (done) { Operators.onErrorDropped(e, parent.messageSubscriber.currentContext()); return; } if (ERROR.compareAndSet(this, null, e)) { done = true; parent.drain(null); } else { done = true; Operators.onErrorDropped(e, parent.messageSubscriber.currentContext()); } } @Override public void onComplete() { } /** * Invoked by the receiver's endpoint publisher to signal mediator termination with completion. */ private void onLinkComplete() { if (done) { return; } done = true; parent.drain(null); } @Override public void request(long n) { throw new IllegalStateException("The request accounting must be through update(,)."); } @Override public void cancel() { if (Operators.terminate(S, this)) { Operators.onDiscardQueueWithClear(queue, parent.currentContext(), null); } endpointStateDisposable.dispose(); } /** * Close the mediator. Closing is triggered in the following cases - * <ul> * <li>When {@link RecoverableReactorReceiver} switches to a new (i.e., next) mediator, it closes the current mediator.</li> * <li>When {@link RecoverableReactorReceiver} terminates (hence {@link MessageFlux}) due to * <ul> * <li>downstream cancellation or</li> * <li>upstream termination with error or completion or</li> * <li>retry-exhaust-error or non-retriable-error or</li> * <li>termination of receiver with error or completion when NULL_RETRY_POLICY is set,</li> * </ul> * it closes the current (i.e., last) mediator. </li> * </ul> */ @Override public Mono<Void> closeAsync() { cancel(); return receiver.closeAsync(); } /** * Updates the disposition state of a message uniquely identified by the given delivery tag. * * @param deliveryTag delivery tag of message. * @param deliveryState Delivery state of message. * * @return A Mono that completes when the state is successfully updated and acknowledged by message broker. */ private Mono<Void> updateDisposition(String deliveryTag, DeliveryState deliveryState) { if (done || s == CANCELLED_SUBSCRIPTION) { final String state = String.format("[link.done:%b link.cancelled:%b parent.done:%b parent.cancelled:%b]", done, s == CANCELLED_SUBSCRIPTION, parent.done, parent.cancelled); final DeliveryNotOnLinkException dispositionError = DeliveryNotOnLinkException.linkClosed(deliveryTag, deliveryState); final Throwable receiverError = error; if (receiverError != null) { dispositionError.addSuppressed(receiverError); } final Throwable upstreamError = parent.error; if (upstreamError != null) { dispositionError.addSuppressed(upstreamError); } return monoError(logger.atError() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue(DELIVERY_STATE_KEY, deliveryState) .addKeyValue("messageFluxState", state), dispositionError); } return receiver.updateDisposition(deliveryTag, deliveryState); } private LoggingEventBuilder updateLogWithReceiverId(LoggingEventBuilder builder) { return builder .addKeyValue(CONNECTION_ID_KEY, receiver.getConnectionId()) .addKeyValue(LINK_NAME_KEY, receiver.getLinkName()) .addKeyValue(ENTITY_PATH_KEY, receiver.getEntityPath()); } } /** * A type that supports atomically setting a mediator and disposing of the last set mediator upon freezing. * Once frozen, further attempt to set the mediator will be rejected. The object of this type holds * the current mediator that the drain-loop access to read events from the receiver (backing the mediator). */ private static final class MediatorHolder { private boolean isFrozen; volatile ReactorReceiverMediator mediator; volatile Disposable nextMediatorRequestDisposable; /** * Try to set the current mediator for the drain-loop. * * @param mediator the mediator. * @return true if the mediator is set successfully, false if the attempt to set is rejected due * to the holder in the frozen state. */ boolean trySet(ReactorReceiverMediator mediator) { synchronized (this) { if (isFrozen) { return false; } this.mediator = mediator; return true; } } /** * Freeze the holder to dispose of the current mediator and any resources it tracks; no further * mediator can be set once frozen. Freezing happens when the message-flux operator is terminated. */ void freeze() { final Disposable d; final ReactorReceiverMediator m; synchronized (this) { if (isFrozen) { return; } d = nextMediatorRequestDisposable; m = this.mediator; isFrozen = true; } if (d != null) { d.dispose(); } if (m != null) { m.closeAsync().subscribe(); } } String getLinkName() { final ReactorReceiverMediator m = mediator; return m != null ? m.receiver.getLinkName() : null; } /** * annotate the log builder with the receiver info (connectionId:linkName:entityPath) if the mediator has * receiver set, else nop. * * @param builder the log builder to annotate. * @return the log builder annotated with receiver info. */ LoggingEventBuilder withReceiverInfo(LoggingEventBuilder builder) { final ReactorReceiverMediator m = mediator; if (m != null) { return builder.addKeyValue(CONNECTION_ID_KEY, m.receiver.getConnectionId()) .addKeyValue(LINK_NAME_KEY, m.receiver.getLinkName()) .addKeyValue(ENTITY_PATH_KEY, m.receiver.getEntityPath()); } return builder; } } }
class MessageFlux extends FluxOperator<AmqpReceiveLink, Message> { /** An AmqpRetryPolicy const indicates that MessageFlux should terminate when the first receiver terminates * (i.e., disables the retry action to obtain next receiver from the upstream). **/ public static final AmqpRetryPolicy NULL_RETRY_POLICY = new FixedAmqpRetryPolicy(new AmqpRetryOptions()); private static final String MESSAGE_FLUX_KEY = "messageFlux"; private final ClientLogger logger; /** * The prefetch value used by the credit computation strategy. */ private final int prefetch; /** * The mode representing the strategy to compute and send the receiver credit. * See {@link CreditAccountingStrategy} */ private final CreditFlowMode creditFlowMode; /** * The retry policy to use to establish a new receiver when the current receiver encounter terminal error. */ private final AmqpRetryPolicy retryPolicy; /** * The function for updating disposition state of messages using the current receiver. */ private volatile DispositionFunction updateDispositionFunc; /** * Create a message-flux to stream messages from a messaging entity to downstream subscriber. * * @param source the upstream source that, upon a request, provide a new receiver connected to the messaging entity. * @param prefetch the number of messages that the operator should prefetch from the messaging entity (for a * less chatty network and faster message processing on the client). * @param creditFlowMode the mode indicating how to compute the credit and when to send it to the broker. * @param retryPolicy the retry policy to use to obtain a new receiver upon current receiver termination. * @throws IllegalStateException if the {@code prefetch} is a negative value. * @throws NullPointerException if the {@code retryPolicy} is {@code null}. */ /** * Register the downstream subscriber. * * @param actual the downstream subscriber interested in the published messages and termination. */ @Override public void subscribe(CoreSubscriber<? super Message> actual) { source.subscribe(new RecoverableReactorReceiver(this, actual, prefetch, creditFlowMode, retryPolicy)); } /** * Updates the disposition state of a message uniquely identified by the given delivery tag. * * @param deliveryTag delivery tag of message. * @param deliveryState Delivery state of message. * * @return A Mono that completes when the state is successfully updated and acknowledged by message broker. */ public Mono<Void> updateDisposition(String deliveryTag, DeliveryState deliveryState) { final DispositionFunction function = this.updateDispositionFunc; return function.updateDisposition(deliveryTag, deliveryState); } /** * The callback invoked when next receiver is attached to the messaging entity from which this message-flux * instance stream messages. There will be only one receiver at a time, and this callback delivers the reference * to the function to disposition messages that arrives in the new receiver. * * @param updateDispositionFunc the function to disposition messages delivered by the current backing receiver. */ void onNextUpdateDispositionFunction(DispositionFunction updateDispositionFunc) { this.updateDispositionFunc = updateDispositionFunc; } /** * Represents a function that accepts delivery tag and disposition state {@link DeliveryState} to set for the message * identified by that delivery tag. The function returns {@link Mono} representing the outcome of the disposition * operation attempted. */ @FunctionalInterface private interface DispositionFunction { /** * Indicate that the disposition cannot be attempted as there is no backing receiver link to perform the operation. */ DispositionFunction NO_DISPOSITION = (t, s) -> Mono.error(new IllegalStateException("Cannot update disposition as no receive-link is established.")); /** * Updates the disposition state of a message uniquely identified by the given delivery tag. * * @param deliveryTag delivery tag of message. * @param deliveryState Delivery state of message. * * @return A Mono that completes when the state is successfully updated and acknowledged by message broker. */ Mono<Void> updateDisposition(String deliveryTag, DeliveryState deliveryState); } /** * The underlying consumer and producer extension of the message-flux operator. The consuming side processes events * (about new receiver, terminal signals) from the upstream and events (messages, terminal signals) from * the current receiver. The producing side publishes the messages to message-flux's downstream. The type has * a recovery mechanism to obtain a new receiver from upstream upon the current receiver's termination. * Recoveries happen underneath while the messages flow transparently downstream. The type can terminate downstream * if the upstream terminates, the recovery path encounters a non-retriable error (i.e., the current receiver * terminated with a non-retriable error), or recovery retries exhaust. */ private static final class RecoverableReactorReceiver implements CoreSubscriber<AmqpReceiveLink>, Subscription { private final MediatorHolder mediatorHolder = new MediatorHolder(); private final MessageFlux parent; private final int prefetch; private final CreditFlowMode creditFlowMode; private final AmqpRetryPolicy retryPolicy; private final ClientLogger logger; private final AtomicInteger retryAttempts = new AtomicInteger(); private final CoreSubscriber<? super Message> messageSubscriber; private Subscription upstream; private volatile long requested; @SuppressWarnings("rawtypes") private static final AtomicLongFieldUpdater<RecoverableReactorReceiver> REQUESTED = AtomicLongFieldUpdater.newUpdater(RecoverableReactorReceiver.class, "requested"); private volatile int wip; @SuppressWarnings("rawtypes") private static final AtomicIntegerFieldUpdater<RecoverableReactorReceiver> WIP = AtomicIntegerFieldUpdater.newUpdater(RecoverableReactorReceiver.class, "wip"); private volatile boolean done; private volatile boolean cancelled; private volatile Throwable error; @SuppressWarnings("rawtypes") private static final AtomicReferenceFieldUpdater<RecoverableReactorReceiver, Throwable> ERROR = AtomicReferenceFieldUpdater.newUpdater(RecoverableReactorReceiver.class, Throwable.class, "error"); /** * Create a recoverable-receiver that supports the message-flux to stream messages from the receiver attached * to a messaging entity to the message-flux's downstream subscriber and recover from receiver termination. * * @param parent the parent message-flux. * @param messageSubscriber the message-flux's downstream subscriber to notify the events. * @param prefetch the number of messages that the operator should prefetch from the messaging entity * (for a less chatty network and faster message processing on the client). * @param creditFlowMode the mode indicating how to compute the credit and when to send it to the broker. * @param retryPolicy the retry policy to use to recover from receiver termination. */ RecoverableReactorReceiver(MessageFlux parent, CoreSubscriber<? super Message> messageSubscriber, int prefetch, CreditFlowMode creditFlowMode, AmqpRetryPolicy retryPolicy) { this.parent = parent; this.messageSubscriber = messageSubscriber; this.prefetch = prefetch; this.creditFlowMode = creditFlowMode; this.retryPolicy = retryPolicy; this.logger = parent.logger; } /** * Invoked by the upstream in response to message-flux subscribing to it. * * @param s the subscription handle for requesting receivers from the upstream or terminating upstream * through cancellation when it is no longer needed. */ @Override public void onSubscribe(Subscription s) { if (Operators.validate(upstream, s)) { upstream = s; messageSubscriber.onSubscribe(this); s.request(1); } } /** * Invoked by the upstream to deliver new receiver. * * @param receiver the new receiver. */ @Override public void onNext(AmqpReceiveLink receiver) { if (done) { receiver.closeAsync().subscribe(); Operators.onNextDropped(receiver, messageSubscriber.currentContext()); return; } final ReactorReceiverMediator mediator = new ReactorReceiverMediator(this, receiver, prefetch, creditFlowMode, logger); if (mediatorHolder.trySet(mediator)) { mediator.onParentReady(); } else { logger.atWarning() .addKeyValue("oldLinkName", mediatorHolder.getLinkName()) .addKeyValue(LINK_NAME_KEY, receiver.getLinkName()) .addKeyValue(ENTITY_PATH_KEY, receiver.getEntityPath()) .log("Got a AmqpReceiveLink when the MessageFlux is already terminated."); receiver.closeAsync().subscribe(); Operators.onDiscard(receiver, messageSubscriber.currentContext()); } } /** * Signals operator termination with error. * <ul> * <li>Invoked by the upstream when it errors, or</li> * <li>Invoked by the drain-loop when it detects a 'non-retriable or retry exhaust' error, or </li> * <li>Invoked by the drain-loop when it detects the receiver signaled terminal completion and retry * is disabled (i.e., NULL_RETRY_POLICY is set).</li> * </ul> * * @param e the error signaled. */ @Override public void onError(Throwable e) { if (done) { Operators.onErrorDropped(e, messageSubscriber.currentContext()); return; } if (Exceptions.addThrowable(ERROR, this, e)) { done = true; final String logMessage; if (retryPolicy == NULL_RETRY_POLICY) { logMessage = "Terminal error signal from Upstream|Receiver arrived at MessageFlux."; } else { logMessage = "Terminal error signal from Upstream|RetryLoop arrived at MessageFlux."; } mediatorHolder.withReceiverInfo(logger.atWarning()).log(logMessage, e); drain(null); } else { Operators.onErrorDropped(e, messageSubscriber.currentContext()); } } /** * Signals operator termination with completion. * <ul> * <li>Invoked by the upstream when it completes, or</li> * <li>Invoked by the the drain-loop when it detects first receiver signaled terminal completion and retry * is disabled (i.e., NULL_RETRY_POLICY is set).</li> * </ul> */ @Override public void onComplete() { if (done) { return; } done = true; final String logMessage; if (retryPolicy == NULL_RETRY_POLICY) { logMessage = "Terminal completion signal from Upstream|Receiver arrived at MessageFlux."; } else { logMessage = "Terminal completion signal from Upstream arrived at MessageFlux."; } mediatorHolder.withReceiverInfo(logger.atWarning()).log(logMessage); drain(null); } /** * Invoked by the downstream message subscriber to signal the demand for messages. Whatever has been * requested can be sent downstream, so only signal the demand for what can be safely handled. * No messages will be sent downstream until the demand is signaled. * * @param n the number of messages to send to downstream. */ @Override public void request(long n) { if (Operators.validate(n)) { Operators.addCap(REQUESTED, this, n); drain(null); } } /** * Invoked by downstream to signal termination by cancellation. */ @Override public void cancel() { if (cancelled) { return; } cancelled = true; mediatorHolder.withReceiverInfo(logger.atWarning()) .log("Downstream cancellation signal arrived at MessageFlux."); if (WIP.getAndIncrement(this) == 0) { upstream.cancel(); mediatorHolder.freeze(); } } /** * Invoked by the new mediator when it is ready to be used. A mediator constructed in 'onNext' moves * to ready state when its backing receiver is active. * * @param updateDispositionFunc the function to disposition messages from mediator's backing receiver. */ void onMediatorReady(DispositionFunction updateDispositionFunc) { retryAttempts.set(0); parent.onNextUpdateDispositionFunction(updateDispositionFunc); drain(null); } /** * The serialized entry point to drain-loop. * * @param dataSignal the message to drop if the operator is terminated by cancellation. */ void drain(Message dataSignal) { if (WIP.getAndIncrement(this) != 0) { if (dataSignal != null && cancelled) { Operators.onDiscard(dataSignal, messageSubscriber.currentContext()); } return; } drainLoop(); } /** * The serialized drain-loop (implementation patterns inspired from RxJava, Reactor Operators). * Reference: 'Operator Concurrency Primitives' series https: */ private void drainLoop() { int missed = 1; CoreSubscriber<? super Message> downstream = messageSubscriber; for (; ;) { boolean d = done; ReactorReceiverMediator mediator = mediatorHolder.mediator; boolean hasMediator = mediator != null; if (terminateIfCancelled(downstream, null)) { return; } if (terminateIfErrorOrCompletionSignaled(d, downstream, null)) { return; } long r = requested; long emitted = 0L; boolean mediatorTerminatedAndDrained = false; if (r != 0L && hasMediator) { Queue<Message> q = mediator.queue; while (emitted != r) { Message message = q.poll(); if (terminateIfCancelled(downstream, message)) { return; } if (terminateIfErrorOrCompletionSignaled(done, downstream, message)) { return; } boolean empty = message == null; if (empty && mediator.done) { mediatorTerminatedAndDrained = true; break; } if (empty) { break; } messageSubscriber.onNext(message); emitted++; } if (emitted == r) { if (mediator.queue.isEmpty() && mediator.done) { mediatorTerminatedAndDrained = true; } } if (emitted != 0 && r != Long.MAX_VALUE) { r = REQUESTED.addAndGet(this, -emitted); } mediator.update(r, emitted); } if (r == 0L && hasMediator) { if (terminateIfCancelled(downstream, null)) { return; } if (terminateIfErrorOrCompletionSignaled(done, downstream, null)) { return; } if (mediator.queue.isEmpty() && mediator.done) { mediatorTerminatedAndDrained = true; } } if (mediatorTerminatedAndDrained && !mediator.isRetryInitiated) { mediator.isRetryInitiated = true; mediator.closeAsync().subscribe(); setTerminationSignalOrScheduleNextMediatorRequest(mediator.error, downstream, mediatorHolder); } missed = WIP.addAndGet(this, -missed); if (missed == 0) { break; } } } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p> * See if downstream signaled cancellation to terminate the operator, if so, react to the cancellation. * </p> * @param downstream the downstream. * @param messageDropped the message that gets dropped if cancellation was signaled. * @return true if canceled, false otherwise. */ private boolean terminateIfCancelled(CoreSubscriber<? super Message> downstream, Message messageDropped) { if (cancelled) { Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); return true; } return false; } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p> * See if there is a pending signal for the operator termination with error or completion, if so, react to it * by terminating downstream. * </p> * * @param d indicate if the operator termination was signaled. * @param downstream the downstream. * @param messageDropped the message that gets dropped if termination happened. * @return true if terminated, false otherwise. */ private boolean terminateIfErrorOrCompletionSignaled(boolean d, CoreSubscriber<? super Message> downstream, Message messageDropped) { if (d) { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); Throwable e = error; if (e != null && e != Exceptions.TERMINATED) { e = Exceptions.terminate(ERROR, this); Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); logBuilder.log("MessageFlux reached a terminal error-state, signaling it downstream.", e); downstream.onError(e); return true; } Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); logBuilder.log("MessageFlux reached a terminal completion-state, signaling it downstream."); downstream.onComplete(); return true; } return false; } /** * * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p></p> * 1. When retry is enabled (i.e., NULL_RETRY_POLICY is not set) then schedule request (retry) for the next mediator if * <ul> * <li>the operator is not in a termination signaled state,</li> * <li> and * <ul> * <li>there is no error Or</li> * <li>error is retriable and the retry is not exhausted.</li> * </ul> * </li> * </ul> * 2. If retry is enabled (i.e., NULL_RETRY_POLICY is not set) and there is 'non-retriable or retry exhaust' * error, then set an error signal for the drain-loop to terminate the operator. * <br/> * 3. If retry is disabled (i.e., NULL_RETRY_POLICY is set), then set an error signal (if first receiver error-ed) * or completion signal (if first receiver completed) for the drain-loop to terminate the operator. * <p></p> * @param error the error that leads to error-ed termination of the last mediator or {@code null} * if terminated with completion. * @param downstream the downstream. * @param mediatorHolder the mediator holder. */ private void setTerminationSignalOrScheduleNextMediatorRequest(Throwable error, CoreSubscriber<? super Message> downstream, MediatorHolder mediatorHolder) { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); if (cancelled || done) { logBuilder.log("MessageFlux reached terminal-state [done:{}, cancelled:{}].", done, cancelled); return; } if (retryPolicy == NULL_RETRY_POLICY) { if (error == null) { onComplete(); } else { onError(error); } return; } final Duration delay; if (error == null) { delay = Duration.ofSeconds(1); logBuilder.addKeyValue("retryAfter", delay.toMillis()) .log("Current mediator reached terminal completion-state (retriable:true)."); } else { final int attempt = retryAttempts.incrementAndGet(); delay = retryPolicy.calculateRetryDelay(error, attempt); if (delay != null) { logBuilder.addKeyValue("attempt", attempt) .addKeyValue("retryAfter", delay.toMillis()) .log("Current mediator reached terminal error-state (retriable:true).", error); } else { logBuilder.addKeyValue("attempt", attempt) .log("Current mediator reached terminal error-state (retriable:false) Or MessageFlux retries exhausted.", error); onError(error); return; } } try { scheduleNextMediatorRequest(delay, mediatorHolder); } catch (RejectedExecutionException ree) { final RuntimeException e = Operators.onRejectedExecution(ree, downstream.currentContext()); mediatorHolder.withReceiverInfo(logger.atWarning()) .log("Unable to schedule a request for a new mediator (retriable:false).", e); onError(e); } } /** * Schedule a task to request a new mediator. * * @param delay the backoff duration before requesting the next mediator. * @param mediatorHolder the mediator holder. * @throws RejectedExecutionException if the scheduler is unable to schedule the task. */ private void scheduleNextMediatorRequest(Duration delay, MediatorHolder mediatorHolder) { final Runnable task = () -> { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); if (cancelled || done) { logBuilder.log("During the backoff, MessageFlux reached terminal-state [done:{}, cancelled:{}].", done, cancelled); return; } logBuilder.log("Requesting a new mediator."); upstream.request(1); }; mediatorHolder.nextMediatorRequestDisposable = Schedulers.parallel().schedule(task, delay.toMillis(), TimeUnit.MILLISECONDS); } } /** * The mediator that coordinates between {@link RecoverableReactorReceiver} and a receiver {@link AmqpReceiveLink}. */ private static final class ReactorReceiverMediator implements AsyncCloseable, CoreSubscriber<Message>, Subscription { private static final Subscription CANCELLED_SUBSCRIPTION = Operators.cancelledSubscription(); private final RecoverableReactorReceiver parent; private final AmqpReceiveLink receiver; private final int prefetch; private final CreditFlowMode creditFlowMode; private final ClientLogger logger; private final Disposable.Composite endpointStateDisposable = Disposables.composite(); private CreditAccountingStrategy creditAccounting; private volatile boolean ready; private volatile Subscription s; @SuppressWarnings("rawtypes") private static final AtomicReferenceFieldUpdater<ReactorReceiverMediator, Subscription> S = AtomicReferenceFieldUpdater.newUpdater(ReactorReceiverMediator.class, Subscription.class, "s"); volatile Throwable error; @SuppressWarnings("rawtypes") static final AtomicReferenceFieldUpdater<ReactorReceiverMediator, Throwable> ERROR = AtomicReferenceFieldUpdater.newUpdater(ReactorReceiverMediator.class, Throwable.class, "error"); /** * The flag indicating if the mediator is terminated by completion or error. */ volatile boolean done; /** * The drain loop iteration that first identifies the mediator as terminated (done == true) and * and drained (queue.isEmpty() == true) will initiate a retry to obtain the next mediator. While that retry * completion is pending, any request for messages from downstream may lead to further drain loop iterations; * the 'isRetryInitiated' flag ensures those drain loop iterations (those also see the mediator as terminated * and drained) will not initiate duplicate retries. */ volatile boolean isRetryInitiated; /** * The queue holding messages from the backing receiver's message publisher, waiting to be drained by * the drain-loop iterations. */ final Queue<Message> queue; /** * Create a mediator to channel events (messages, termination) from a receiver to recoverable-receiver. * * @param parent the recoverable-receiver (a.k.a. parent). * @param receiver the receiver backing the mediator. * @param prefetch the number of messages to prefetch using the receiver (for a less chatty network * and faster message processing on the client). * @param creditFlowMode the mode indicating how to compute the credit and when to send it to the broker. */ ReactorReceiverMediator(RecoverableReactorReceiver parent, AmqpReceiveLink receiver, int prefetch, CreditFlowMode creditFlowMode, ClientLogger logger) { this.parent = parent; this.receiver = receiver; this.prefetch = prefetch; this.creditFlowMode = creditFlowMode; this.logger = logger; this.queue = Queues.<Message>get(Integer.MAX_VALUE).get(); } /** * Invoked by the parent {@link RecoverableReactorReceiver} when it is ready to use this new mediator * (The mediator facilitate communication between the parent and the new receiver ({@link AmqpReceiveLink}) that * mediator wraps). In response, this mediator notifies the parent about its readiness by invoking * {@link RecoverableReactorReceiver */ void onParentReady() { updateLogWithReceiverId(logger.atWarning()).log("Setting next mediator and waiting for activation."); receiver.receive().subscribe(this); final Disposable endpointDisposable = receiver.getEndpointStates() .filter(s -> s == AmqpEndpointState.ACTIVE) .publishOn(ReceiversPumpingScheduler.instance()) .subscribe(state -> { assert state == AmqpEndpointState.ACTIVE; if (!ready) { updateLogWithReceiverId(logger.atWarning()).log("The mediator is active."); ready = true; parent.onMediatorReady(this::updateDisposition); } }, e -> { updateLogWithReceiverId(logger.atWarning()).log("Receiver emitted terminal error.", e); onLinkError(e); }, () -> { updateLogWithReceiverId(logger.atWarning()).log("Receiver emitted terminal completion."); onLinkComplete(); }); endpointStateDisposable.add(endpointDisposable); } /** * Invoked in response to the subscription to the receiver's message publisher. * * @param s the subscription to request messages from the receiver's message publisher and terminate * that publisher through cancellation when it is no longer needed. */ @Override public void onSubscribe(Subscription s) { if (Operators.setOnce(S, this, s)) { switch (creditFlowMode) { case RequestDriven: creditAccounting = new RequestDrivenCreditAccountingStrategy(receiver, s, prefetch, logger); break; case EmissionDriven: creditAccounting = new EmissionDrivenCreditAccountingStrategy(receiver, s, prefetch, logger); break; default: throw new IllegalArgumentException("Unknown CreditFlowMode " + creditFlowMode); } } } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p> * Notify the latest view of the downstream request and messages emitted by the emitter-loop during * the last drain-loop iteration. * </p> * @param request the latest view of the downstream request. * @param emitted the number of messages emitted by the latest emitter-loop run. */ void update(long request, long emitted) { if (ready && !done) { creditAccounting.update(request, emitted); } } /** * Invoked by the receiver's message publisher to deliver a message. * * @param message the message. */ @Override public void onNext(Message message) { if (done) { Operators.onNextDropped(message, parent.currentContext()); return; } if (s == Operators.cancelledSubscription()) { Operators.onDiscard(message, parent.currentContext()); return; } if (queue.offer(message)) { parent.drain(message); } else { Operators.onOperatorError(this, Exceptions.failWithOverflow(Exceptions.BACKPRESSURE_ERROR_QUEUE_FULL), parent.messageSubscriber.currentContext()); Operators.onDiscard(message, parent.messageSubscriber.currentContext()); done = true; parent.drain(message); } } @Override public void onError(Throwable e) { } /** * Invoked by the receiver's endpoint publisher to signal mediator termination with an error. * * @param e the error signaled. */ private void onLinkError(Throwable e) { if (done) { Operators.onErrorDropped(e, parent.messageSubscriber.currentContext()); return; } if (ERROR.compareAndSet(this, null, e)) { done = true; parent.drain(null); } else { done = true; Operators.onErrorDropped(e, parent.messageSubscriber.currentContext()); } } @Override public void onComplete() { } /** * Invoked by the receiver's endpoint publisher to signal mediator termination with completion. */ private void onLinkComplete() { if (done) { return; } done = true; parent.drain(null); } @Override public void request(long n) { throw new IllegalStateException("The request accounting must be through update(,)."); } @Override public void cancel() { if (Operators.terminate(S, this)) { Operators.onDiscardQueueWithClear(queue, parent.currentContext(), null); } endpointStateDisposable.dispose(); } /** * Close the mediator. Closing is triggered in the following cases - * <ul> * <li>When {@link RecoverableReactorReceiver} switches to a new (i.e., next) mediator, it closes the current mediator.</li> * <li>When {@link RecoverableReactorReceiver} terminates (hence {@link MessageFlux}) due to * <ul> * <li>downstream cancellation or</li> * <li>upstream termination with error or completion or</li> * <li>retry-exhaust-error or non-retriable-error or</li> * <li>termination of receiver with error or completion when NULL_RETRY_POLICY is set,</li> * </ul> * it closes the current (i.e., last) mediator. </li> * </ul> */ @Override public Mono<Void> closeAsync() { cancel(); return receiver.closeAsync(); } /** * Updates the disposition state of a message uniquely identified by the given delivery tag. * * @param deliveryTag delivery tag of message. * @param deliveryState Delivery state of message. * * @return A Mono that completes when the state is successfully updated and acknowledged by message broker. */ private Mono<Void> updateDisposition(String deliveryTag, DeliveryState deliveryState) { if (done || s == CANCELLED_SUBSCRIPTION) { final String state = String.format("[link.done:%b link.cancelled:%b parent.done:%b parent.cancelled:%b]", done, s == CANCELLED_SUBSCRIPTION, parent.done, parent.cancelled); final DeliveryNotOnLinkException dispositionError = DeliveryNotOnLinkException.linkClosed(deliveryTag, deliveryState); final Throwable receiverError = error; if (receiverError != null) { dispositionError.addSuppressed(receiverError); } final Throwable upstreamError = parent.error; if (upstreamError != null) { dispositionError.addSuppressed(upstreamError); } return monoError(logger.atError() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue(DELIVERY_STATE_KEY, deliveryState) .addKeyValue("messageFluxState", state), dispositionError); } return receiver.updateDisposition(deliveryTag, deliveryState); } private LoggingEventBuilder updateLogWithReceiverId(LoggingEventBuilder builder) { return builder .addKeyValue(CONNECTION_ID_KEY, receiver.getConnectionId()) .addKeyValue(LINK_NAME_KEY, receiver.getLinkName()) .addKeyValue(ENTITY_PATH_KEY, receiver.getEntityPath()); } } /** * A type that supports atomically setting a mediator and disposing of the last set mediator upon freezing. * Once frozen, further attempt to set the mediator will be rejected. The object of this type holds * the current mediator that the drain-loop access to read events from the receiver (backing the mediator). */ private static final class MediatorHolder { private boolean isFrozen; volatile ReactorReceiverMediator mediator; volatile Disposable nextMediatorRequestDisposable; /** * Try to set the current mediator for the drain-loop. * * @param mediator the mediator. * @return true if the mediator is set successfully, false if the attempt to set is rejected due * to the holder in the frozen state. */ boolean trySet(ReactorReceiverMediator mediator) { synchronized (this) { if (isFrozen) { return false; } this.mediator = mediator; return true; } } /** * Freeze the holder to dispose of the current mediator and any resources it tracks; no further * mediator can be set once frozen. Freezing happens when the message-flux operator is terminated. */ void freeze() { final Disposable d; final ReactorReceiverMediator m; synchronized (this) { if (isFrozen) { return; } d = nextMediatorRequestDisposable; m = this.mediator; isFrozen = true; } if (d != null) { d.dispose(); } if (m != null) { m.closeAsync().subscribe(); } } String getLinkName() { final ReactorReceiverMediator m = mediator; return m != null ? m.receiver.getLinkName() : null; } /** * annotate the log builder with the receiver info (connectionId:linkName:entityPath) if the mediator has * receiver set, else no-op. * * @param builder the log builder to annotate. * @return the log builder annotated with receiver info. */ LoggingEventBuilder withReceiverInfo(LoggingEventBuilder builder) { final ReactorReceiverMediator m = mediator; if (m != null) { return builder.addKeyValue(CONNECTION_ID_KEY, m.receiver.getConnectionId()) .addKeyValue(LINK_NAME_KEY, m.receiver.getLinkName()) .addKeyValue(ENTITY_PATH_KEY, m.receiver.getEntityPath()); } return builder; } } }
IIRC we discussed if we should map this to an actual operator like .handle rather than a side-effect operator like doOn\*
void onParentReady() { updateLogWithReceiverId(logger.atWarning()).log("Setting next mediator and waiting for activation."); receiver.receive().subscribe(this); final Disposable endpointDisposable = receiver.getEndpointStates() .filter(s -> s == AmqpEndpointState.ACTIVE) .publishOn(ReceiversPumpingScheduler.instance()) .doOnEach(event -> { if (event.isOnNext()) { assert event.get() == AmqpEndpointState.ACTIVE; if (!ready) { updateLogWithReceiverId(logger.atWarning()).log("The mediator is active."); ready = true; parent.onMediatorReady(this::updateDisposition); } return; } if (event.isOnError()) { final Throwable e = event.getThrowable(); updateLogWithReceiverId(logger.atWarning()).log("Receiver emitted terminal error.", e); onLinkError(e); return; } if (event.isOnComplete()) { updateLogWithReceiverId(logger.atWarning()).log("Receiver emitted terminal completion."); onLinkComplete(); } }).subscribe(__ -> { }, __ -> { }, () -> { }); endpointStateDisposable.add(endpointDisposable); }
.doOnEach(event -> {
void onParentReady() { updateLogWithReceiverId(logger.atWarning()).log("Setting next mediator and waiting for activation."); receiver.receive().subscribe(this); final Disposable endpointDisposable = receiver.getEndpointStates() .filter(s -> s == AmqpEndpointState.ACTIVE) .publishOn(ReceiversPumpingScheduler.instance()) .subscribe(state -> { assert state == AmqpEndpointState.ACTIVE; if (!ready) { updateLogWithReceiverId(logger.atWarning()).log("The mediator is active."); ready = true; parent.onMediatorReady(this::updateDisposition); } }, e -> { updateLogWithReceiverId(logger.atWarning()).log("Receiver emitted terminal error.", e); onLinkError(e); }, () -> { updateLogWithReceiverId(logger.atWarning()).log("Receiver emitted terminal completion."); onLinkComplete(); }); endpointStateDisposable.add(endpointDisposable); }
class ReactorReceiverMediator implements AsyncCloseable, CoreSubscriber<Message>, Subscription { private static final Subscription CANCELLED_SUBSCRIPTION = Operators.cancelledSubscription(); private final RecoverableReactorReceiver parent; private final AmqpReceiveLink receiver; private final int prefetch; private final CreditFlowMode creditFlowMode; private final ClientLogger logger; private final Disposable.Composite endpointStateDisposable = Disposables.composite(); private CreditAccountingStrategy creditAccounting; private volatile boolean ready; private volatile Subscription s; @SuppressWarnings("rawtypes") private static final AtomicReferenceFieldUpdater<ReactorReceiverMediator, Subscription> S = AtomicReferenceFieldUpdater.newUpdater(ReactorReceiverMediator.class, Subscription.class, "s"); volatile Throwable error; @SuppressWarnings("rawtypes") static final AtomicReferenceFieldUpdater<ReactorReceiverMediator, Throwable> ERROR = AtomicReferenceFieldUpdater.newUpdater(ReactorReceiverMediator.class, Throwable.class, "error"); /** * The flag indicating if the mediator is terminated by completion or error. */ volatile boolean done; /** * The drain loop iteration that first identifies the mediator as terminated (done == true) and * and drained (queue.isEmpty() == true) will initiate a retry to obtain the next mediator. While that retry * completion is pending, any request for messages from downstream may lead to further drain loop iterations; * the 'isRetryInitiated' flag ensures those drain loop iterations (those also see the mediator as terminated * and drained) will not initiate duplicate retries. */ volatile boolean isRetryInitiated; /** * The queue holding messages from the backing receiver's message publisher, waiting to be drained by * the drain-loop iterations. */ final Queue<Message> queue; /** * Create a mediator to channel events (messages, termination) from a receiver to recoverable-receiver. * * @param parent the recoverable-receiver (a.k.a. parent). * @param receiver the receiver backing the mediator. * @param prefetch the number of messages to prefetch using the receiver (for a less chatty network * and faster message processing on the client). * @param creditFlowMode the mode indicating how to compute the credit and when to send it to the broker. */ ReactorReceiverMediator(RecoverableReactorReceiver parent, AmqpReceiveLink receiver, int prefetch, CreditFlowMode creditFlowMode, ClientLogger logger) { this.parent = parent; this.receiver = receiver; this.prefetch = prefetch; this.creditFlowMode = creditFlowMode; this.logger = logger; this.queue = Queues.<Message>get(Integer.MAX_VALUE).get(); } /** * Invoked by the parent {@link RecoverableReactorReceiver} when it is ready to use this new mediator * (The mediator facilitate communication between the parent and the new receiver ({@link AmqpReceiveLink}) that * mediator wraps). In response, this mediator notifies the parent about its readiness by invoking * {@link RecoverableReactorReceiver */ /** * Invoked in response to the subscription to the receiver's message publisher. * * @param s the subscription to request messages from the receiver's message publisher and terminate * that publisher through cancellation when it is no longer needed. */ @Override public void onSubscribe(Subscription s) { if (Operators.setOnce(S, this, s)) { switch (creditFlowMode) { case RequestDriven: creditAccounting = new RequestDrivenCreditAccountingStrategy(receiver, s, prefetch, logger); break; case EmissionDriven: creditAccounting = new EmissionDrivenCreditAccountingStrategy(receiver, s, prefetch, logger); break; default: throw new IllegalArgumentException("Unknown CreditFlowMode " + creditFlowMode); } } } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p/> * Notify the latest view of the downstream request and messages emitted by the emitter-loop during * the last drain-loop iteration. * * @param request the latest view of the downstream request. * @param emitted the number of messages emitted by the latest emitter-loop run. */ void update(long request, long emitted) { if (ready && !done) { creditAccounting.update(request, emitted); } } /** * Invoked by the receiver's message publisher to deliver a message. * * @param message the message. */ @Override public void onNext(Message message) { if (done) { Operators.onNextDropped(message, parent.currentContext()); return; } if (s == Operators.cancelledSubscription()) { Operators.onDiscard(message, parent.currentContext()); return; } if (queue.offer(message)) { parent.drain(message); } else { Operators.onOperatorError(this, Exceptions.failWithOverflow(Exceptions.BACKPRESSURE_ERROR_QUEUE_FULL), parent.messageSubscriber.currentContext()); Operators.onDiscard(message, parent.messageSubscriber.currentContext()); done = true; parent.drain(message); } } @Override public void onError(Throwable e) { } /** * Invoked by the receiver's endpoint publisher to signal mediator termination with an error. * * @param e the error signaled. */ private void onLinkError(Throwable e) { if (done) { Operators.onErrorDropped(e, parent.messageSubscriber.currentContext()); return; } if (ERROR.compareAndSet(this, null, e)) { done = true; parent.drain(null); } else { done = true; Operators.onErrorDropped(e, parent.messageSubscriber.currentContext()); } } @Override public void onComplete() { } /** * Invoked by the receiver's endpoint publisher to signal mediator termination with completion. */ private void onLinkComplete() { if (done) { return; } done = true; parent.drain(null); } @Override public void request(long n) { throw new IllegalStateException("The request accounting must be through update(,)."); } @Override public void cancel() { if (Operators.terminate(S, this)) { Operators.onDiscardQueueWithClear(queue, parent.currentContext(), null); } endpointStateDisposable.dispose(); } /** * Close the mediator. Closing is triggered in the following cases - * <ul> * <li>When {@link RecoverableReactorReceiver} switches to a new (i.e., next) mediator, it closes the current mediator.</li> * <li>When {@link RecoverableReactorReceiver} terminates (hence {@link MessageFlux}) due to * <ul> * <li>downstream cancellation or</li> * <li>upstream termination with error or completion or</li> * <li>retry-exhaust-error or non-retriable-error or</li> * <li>termination of receiver with error or completion when NULL_RETRY_POLICY is set,</li> * </ul> * it closes the current (i.e., last) mediator. </li> * </ul> */ @Override public Mono<Void> closeAsync() { cancel(); return receiver.closeAsync(); } /** * Updates the disposition state of a message uniquely identified by the given delivery tag. * * @param deliveryTag delivery tag of message. * @param deliveryState Delivery state of message. * * @return A Mono that completes when the state is successfully updated and acknowledged by message broker. */ private Mono<Void> updateDisposition(String deliveryTag, DeliveryState deliveryState) { if (done || s == CANCELLED_SUBSCRIPTION) { final String state = String.format("[link.done:%b link.cancelled:%b parent.done:%b parent.cancelled:%b]", done, s == CANCELLED_SUBSCRIPTION, parent.done, parent.cancelled); final DeliveryNotOnLinkException dispositionError = DeliveryNotOnLinkException.linkClosed(deliveryTag, deliveryState); final Throwable receiverError = error; if (receiverError != null) { dispositionError.addSuppressed(receiverError); } final Throwable upstreamError = parent.error; if (upstreamError != null) { dispositionError.addSuppressed(upstreamError); } return monoError(logger.atError() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue(DELIVERY_STATE_KEY, deliveryState) .addKeyValue("messageFluxState", state), dispositionError); } return receiver.updateDisposition(deliveryTag, deliveryState); } private LoggingEventBuilder updateLogWithReceiverId(LoggingEventBuilder builder) { return builder .addKeyValue(CONNECTION_ID_KEY, receiver.getConnectionId()) .addKeyValue(LINK_NAME_KEY, receiver.getLinkName()) .addKeyValue(ENTITY_PATH_KEY, receiver.getEntityPath()); } }
class ReactorReceiverMediator implements AsyncCloseable, CoreSubscriber<Message>, Subscription { private static final Subscription CANCELLED_SUBSCRIPTION = Operators.cancelledSubscription(); private final RecoverableReactorReceiver parent; private final AmqpReceiveLink receiver; private final int prefetch; private final CreditFlowMode creditFlowMode; private final ClientLogger logger; private final Disposable.Composite endpointStateDisposable = Disposables.composite(); private CreditAccountingStrategy creditAccounting; private volatile boolean ready; private volatile Subscription s; @SuppressWarnings("rawtypes") private static final AtomicReferenceFieldUpdater<ReactorReceiverMediator, Subscription> S = AtomicReferenceFieldUpdater.newUpdater(ReactorReceiverMediator.class, Subscription.class, "s"); volatile Throwable error; @SuppressWarnings("rawtypes") static final AtomicReferenceFieldUpdater<ReactorReceiverMediator, Throwable> ERROR = AtomicReferenceFieldUpdater.newUpdater(ReactorReceiverMediator.class, Throwable.class, "error"); /** * The flag indicating if the mediator is terminated by completion or error. */ volatile boolean done; /** * The drain loop iteration that first identifies the mediator as terminated (done == true) and * and drained (queue.isEmpty() == true) will initiate a retry to obtain the next mediator. While that retry * completion is pending, any request for messages from downstream may lead to further drain loop iterations; * the 'isRetryInitiated' flag ensures those drain loop iterations (those also see the mediator as terminated * and drained) will not initiate duplicate retries. */ volatile boolean isRetryInitiated; /** * The queue holding messages from the backing receiver's message publisher, waiting to be drained by * the drain-loop iterations. */ final Queue<Message> queue; /** * Create a mediator to channel events (messages, termination) from a receiver to recoverable-receiver. * * @param parent the recoverable-receiver (a.k.a. parent). * @param receiver the receiver backing the mediator. * @param prefetch the number of messages to prefetch using the receiver (for a less chatty network * and faster message processing on the client). * @param creditFlowMode the mode indicating how to compute the credit and when to send it to the broker. */ ReactorReceiverMediator(RecoverableReactorReceiver parent, AmqpReceiveLink receiver, int prefetch, CreditFlowMode creditFlowMode, ClientLogger logger) { this.parent = parent; this.receiver = receiver; this.prefetch = prefetch; this.creditFlowMode = creditFlowMode; this.logger = logger; this.queue = Queues.<Message>get(Integer.MAX_VALUE).get(); } /** * Invoked by the parent {@link RecoverableReactorReceiver} when it is ready to use this new mediator * (The mediator facilitate communication between the parent and the new receiver ({@link AmqpReceiveLink}) that * mediator wraps). In response, this mediator notifies the parent about its readiness by invoking * {@link RecoverableReactorReceiver */ /** * Invoked in response to the subscription to the receiver's message publisher. * * @param s the subscription to request messages from the receiver's message publisher and terminate * that publisher through cancellation when it is no longer needed. */ @Override public void onSubscribe(Subscription s) { if (Operators.setOnce(S, this, s)) { switch (creditFlowMode) { case RequestDriven: creditAccounting = new RequestDrivenCreditAccountingStrategy(receiver, s, prefetch, logger); break; case EmissionDriven: creditAccounting = new EmissionDrivenCreditAccountingStrategy(receiver, s, prefetch, logger); break; default: throw new IllegalArgumentException("Unknown CreditFlowMode " + creditFlowMode); } } } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p> * Notify the latest view of the downstream request and messages emitted by the emitter-loop during * the last drain-loop iteration. * </p> * @param request the latest view of the downstream request. * @param emitted the number of messages emitted by the latest emitter-loop run. */ void update(long request, long emitted) { if (ready && !done) { creditAccounting.update(request, emitted); } } /** * Invoked by the receiver's message publisher to deliver a message. * * @param message the message. */ @Override public void onNext(Message message) { if (done) { Operators.onNextDropped(message, parent.currentContext()); return; } if (s == Operators.cancelledSubscription()) { Operators.onDiscard(message, parent.currentContext()); return; } if (queue.offer(message)) { parent.drain(message); } else { Operators.onOperatorError(this, Exceptions.failWithOverflow(Exceptions.BACKPRESSURE_ERROR_QUEUE_FULL), parent.messageSubscriber.currentContext()); Operators.onDiscard(message, parent.messageSubscriber.currentContext()); done = true; parent.drain(message); } } @Override public void onError(Throwable e) { } /** * Invoked by the receiver's endpoint publisher to signal mediator termination with an error. * * @param e the error signaled. */ private void onLinkError(Throwable e) { if (done) { Operators.onErrorDropped(e, parent.messageSubscriber.currentContext()); return; } if (ERROR.compareAndSet(this, null, e)) { done = true; parent.drain(null); } else { done = true; Operators.onErrorDropped(e, parent.messageSubscriber.currentContext()); } } @Override public void onComplete() { } /** * Invoked by the receiver's endpoint publisher to signal mediator termination with completion. */ private void onLinkComplete() { if (done) { return; } done = true; parent.drain(null); } @Override public void request(long n) { throw new IllegalStateException("The request accounting must be through update(,)."); } @Override public void cancel() { if (Operators.terminate(S, this)) { Operators.onDiscardQueueWithClear(queue, parent.currentContext(), null); } endpointStateDisposable.dispose(); } /** * Close the mediator. Closing is triggered in the following cases - * <ul> * <li>When {@link RecoverableReactorReceiver} switches to a new (i.e., next) mediator, it closes the current mediator.</li> * <li>When {@link RecoverableReactorReceiver} terminates (hence {@link MessageFlux}) due to * <ul> * <li>downstream cancellation or</li> * <li>upstream termination with error or completion or</li> * <li>retry-exhaust-error or non-retriable-error or</li> * <li>termination of receiver with error or completion when NULL_RETRY_POLICY is set,</li> * </ul> * it closes the current (i.e., last) mediator. </li> * </ul> */ @Override public Mono<Void> closeAsync() { cancel(); return receiver.closeAsync(); } /** * Updates the disposition state of a message uniquely identified by the given delivery tag. * * @param deliveryTag delivery tag of message. * @param deliveryState Delivery state of message. * * @return A Mono that completes when the state is successfully updated and acknowledged by message broker. */ private Mono<Void> updateDisposition(String deliveryTag, DeliveryState deliveryState) { if (done || s == CANCELLED_SUBSCRIPTION) { final String state = String.format("[link.done:%b link.cancelled:%b parent.done:%b parent.cancelled:%b]", done, s == CANCELLED_SUBSCRIPTION, parent.done, parent.cancelled); final DeliveryNotOnLinkException dispositionError = DeliveryNotOnLinkException.linkClosed(deliveryTag, deliveryState); final Throwable receiverError = error; if (receiverError != null) { dispositionError.addSuppressed(receiverError); } final Throwable upstreamError = parent.error; if (upstreamError != null) { dispositionError.addSuppressed(upstreamError); } return monoError(logger.atError() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue(DELIVERY_STATE_KEY, deliveryState) .addKeyValue("messageFluxState", state), dispositionError); } return receiver.updateDisposition(deliveryTag, deliveryState); } private LoggingEventBuilder updateLogWithReceiverId(LoggingEventBuilder builder) { return builder .addKeyValue(CONNECTION_ID_KEY, receiver.getConnectionId()) .addKeyValue(LINK_NAME_KEY, receiver.getLinkName()) .addKeyValue(ENTITY_PATH_KEY, receiver.getEntityPath()); } }
Do we need this method that takes an updateDispositionFunc? I only see it in one place and I feel like it makes the chain harder to follow. I tried tof ind other callees of this bifunction but could not.
private void drainLoop() { int missed = 1; CoreSubscriber<? super Message> downstream = messageSubscriber; for (; ;) { boolean d = done; ReactorReceiverMediator mediator = mediatorHolder.mediator; boolean hasMediator = mediator != null; if (terminateIfCancelled(downstream, null)) { return; } if (terminateIfErrorOrCompletionSignaled(d, downstream, null)) { return; } long r = requested; long emitted = 0L; boolean mediatorTerminatedAndDrained = false; if (r != 0L && hasMediator) { Queue<Message> q = mediator.queue; while (emitted != r) { Message message = q.poll(); if (terminateIfCancelled(downstream, message)) { return; } if (terminateIfErrorOrCompletionSignaled(done, downstream, message)) { return; } boolean empty = message == null; if (empty && mediator.done) { mediatorTerminatedAndDrained = true; break; } if (empty) { break; } messageSubscriber.onNext(message); emitted++; } if (emitted == r) { if (mediator.queue.isEmpty() && mediator.done) { mediatorTerminatedAndDrained = true; } } if (emitted != 0 && r != Long.MAX_VALUE) { r = REQUESTED.addAndGet(this, -emitted); } mediator.update(r, emitted); } if (r == 0L && hasMediator) { if (terminateIfCancelled(downstream, null)) { return; } if (terminateIfErrorOrCompletionSignaled(done, downstream, null)) { return; } if (mediator.queue.isEmpty() && mediator.done) { mediatorTerminatedAndDrained = true; } } if (mediatorTerminatedAndDrained && !mediator.isRetryInitiated) { mediator.isRetryInitiated = true; mediator.closeAsync().subscribe(); setTerminationSignalOrScheduleNextMediatorRequest(mediator.error, downstream, mediatorHolder); } missed = WIP.addAndGet(this, -missed); if (missed == 0) { break; } } }
boolean d = done;
private void drainLoop() { int missed = 1; CoreSubscriber<? super Message> downstream = messageSubscriber; for (; ;) { boolean d = done; ReactorReceiverMediator mediator = mediatorHolder.mediator; boolean hasMediator = mediator != null; if (terminateIfCancelled(downstream, null)) { return; } if (terminateIfErrorOrCompletionSignaled(d, downstream, null)) { return; } long r = requested; long emitted = 0L; boolean mediatorTerminatedAndDrained = false; if (r != 0L && hasMediator) { Queue<Message> q = mediator.queue; while (emitted != r) { Message message = q.poll(); if (terminateIfCancelled(downstream, message)) { return; } if (terminateIfErrorOrCompletionSignaled(done, downstream, message)) { return; } boolean empty = message == null; if (empty && mediator.done) { mediatorTerminatedAndDrained = true; break; } if (empty) { break; } messageSubscriber.onNext(message); emitted++; } if (emitted == r) { if (mediator.queue.isEmpty() && mediator.done) { mediatorTerminatedAndDrained = true; } } if (emitted != 0 && r != Long.MAX_VALUE) { r = REQUESTED.addAndGet(this, -emitted); } mediator.update(r, emitted); } if (r == 0L && hasMediator) { if (terminateIfCancelled(downstream, null)) { return; } if (terminateIfErrorOrCompletionSignaled(done, downstream, null)) { return; } if (mediator.queue.isEmpty() && mediator.done) { mediatorTerminatedAndDrained = true; } } if (mediatorTerminatedAndDrained && !mediator.isRetryInitiated) { mediator.isRetryInitiated = true; mediator.closeAsync().subscribe(); setTerminationSignalOrScheduleNextMediatorRequest(mediator.error, downstream, mediatorHolder); } missed = WIP.addAndGet(this, -missed); if (missed == 0) { break; } } }
class RecoverableReactorReceiver implements CoreSubscriber<AmqpReceiveLink>, Subscription { private final MediatorHolder mediatorHolder = new MediatorHolder(); private final MessageFlux parent; private final int prefetch; private final CreditFlowMode creditFlowMode; private final AmqpRetryPolicy retryPolicy; private final ClientLogger logger; private final AtomicInteger retryAttempts = new AtomicInteger(); private final CoreSubscriber<? super Message> messageSubscriber; private Subscription upstream; private volatile long requested; @SuppressWarnings("rawtypes") private static final AtomicLongFieldUpdater<RecoverableReactorReceiver> REQUESTED = AtomicLongFieldUpdater.newUpdater(RecoverableReactorReceiver.class, "requested"); private volatile int wip; @SuppressWarnings("rawtypes") private static final AtomicIntegerFieldUpdater<RecoverableReactorReceiver> WIP = AtomicIntegerFieldUpdater.newUpdater(RecoverableReactorReceiver.class, "wip"); private volatile boolean done; private volatile boolean cancelled; volatile Throwable error; @SuppressWarnings("rawtypes") static final AtomicReferenceFieldUpdater<RecoverableReactorReceiver, Throwable> ERROR = AtomicReferenceFieldUpdater.newUpdater(RecoverableReactorReceiver.class, Throwable.class, "error"); /** * Create a recoverable-receiver that supports the message-flux to stream messages from the receiver attached * to a messaging entity to the message-flux's downstream subscriber and recover from receiver termination. * * @param parent the parent message-flux. * @param messageSubscriber the message-flux's downstream subscriber to notify the events. * @param prefetch the number of messages that the operator should prefetch from the messaging entity * (for a less chatty network and faster message processing on the client). * @param creditFlowMode the mode indicating how to compute the credit and when to send it to the broker. * @param retryPolicy the retry policy to use to recover from receiver termination. */ RecoverableReactorReceiver(MessageFlux parent, CoreSubscriber<? super Message> messageSubscriber, int prefetch, CreditFlowMode creditFlowMode, AmqpRetryPolicy retryPolicy) { this.parent = parent; this.messageSubscriber = messageSubscriber; this.prefetch = prefetch; this.creditFlowMode = creditFlowMode; this.retryPolicy = retryPolicy; this.logger = parent.logger; } /** * Invoked by the upstream in response to message-flux subscribing to it. * * @param s the subscription handle for requesting receivers from the upstream or terminating upstream * through cancellation when it is no longer needed. */ @Override public void onSubscribe(Subscription s) { if (Operators.validate(upstream, s)) { upstream = s; messageSubscriber.onSubscribe(this); s.request(1); } } /** * Invoked by the upstream to deliver new receiver. * * @param receiver the new receiver. */ @Override public void onNext(AmqpReceiveLink receiver) { if (done) { receiver.closeAsync().subscribe(); Operators.onNextDropped(receiver, messageSubscriber.currentContext()); return; } final ReactorReceiverMediator mediator = new ReactorReceiverMediator(this, receiver, prefetch, creditFlowMode, logger); if (mediatorHolder.trySet(mediator)) { mediator.onParentReady(); } else { logger.atWarning() .addKeyValue("oldLinkName", mediatorHolder.getLinkName()) .addKeyValue(LINK_NAME_KEY, receiver.getLinkName()) .addKeyValue(ENTITY_PATH_KEY, receiver.getEntityPath()) .log("Got a AmqpReceiveLink when the MessageFlux is already terminated."); receiver.closeAsync().subscribe(); Operators.onDiscard(receiver, messageSubscriber.currentContext()); } } /** * Signals operator termination with error. * <ul> * <li>Invoked by the upstream when it errors, or</li> * <li>Invoked by the drain-loop when it detects a 'non-retriable or retry exhaust' error, or </li> * <li>Invoked by the drain-loop when it detects the receiver signaled terminal completion and retry * is disabled (i.e., NULL_RETRY_POLICY is set).</li> * </ul> * * @param e the error signaled. */ @Override public void onError(Throwable e) { if (done) { Operators.onErrorDropped(e, messageSubscriber.currentContext()); return; } if (Exceptions.addThrowable(ERROR, this, e)) { done = true; final String logMessage; if (retryPolicy == NULL_RETRY_POLICY) { logMessage = "Terminal error signal from Upstream|Receiver arrived at MessageFlux."; } else { logMessage = "Terminal error signal from Upstream|RetryLoop arrived at MessageFlux."; } mediatorHolder.withReceiverInfo(logger.atWarning()).log(logMessage, e); drain(null); } else { Operators.onErrorDropped(e, messageSubscriber.currentContext()); } } /** * Signals operator termination with completion. * <ul> * <li>Invoked by the upstream when it completes, or</li> * <li>Invoked by the the drain-loop when it detects first receiver signaled terminal completion and retry * is disabled (i.e., NULL_RETRY_POLICY is set).</li> * </ul> */ @Override public void onComplete() { if (done) { return; } done = true; final String logMessage; if (retryPolicy == NULL_RETRY_POLICY) { logMessage = "Terminal completion signal from Upstream|Receiver arrived at MessageFlux."; } else { logMessage = "Terminal completion signal from Upstream arrived at MessageFlux."; } mediatorHolder.withReceiverInfo(logger.atWarning()).log(logMessage); drain(null); } /** * Invoked by the downstream message subscriber to signal the demand for messages. Whatever has been * requested can be sent downstream, so only signal the demand for what can be safely handled. * No messages will be sent downstream until the demand is signaled. * * @param n the number of messages to send to downstream. */ @Override public void request(long n) { if (Operators.validate(n)) { Operators.addCap(REQUESTED, this, n); drain(null); } } /** * Invoked by downstream to signal termination by cancellation. */ @Override public void cancel() { if (cancelled) { return; } cancelled = true; mediatorHolder.withReceiverInfo(logger.atWarning()) .log("Downstream cancellation signal arrived at MessageFlux."); if (WIP.getAndIncrement(this) == 0) { upstream.cancel(); mediatorHolder.freeze(); } } /** * Invoked by the new mediator when it is ready to be used. A mediator constructed in 'onNext' moves * to ready state when its backing receiver is active. * * @param updateDispositionFunc the function to disposition messages from mediator's backing receiver. */ void onMediatorReady(BiFunction<String, DeliveryState, Mono<Void>> updateDispositionFunc) { retryAttempts.set(0); parent.onNextUpdateDispositionFunction(updateDispositionFunc); drain(null); } /** * The serialized entry point to drain-loop. * * @param dataSignal the message to drop if the operator is terminated by cancellation. */ void drain(Message dataSignal) { if (WIP.getAndIncrement(this) != 0) { if (dataSignal != null && cancelled) { Operators.onDiscard(dataSignal, messageSubscriber.currentContext()); } return; } drainLoop(); } /** * The serialized drain-loop (implementation patterns inspired from RxJava, Reactor Operators). * Reference: 'Operator Concurrency Primitives' series https: */ /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p/> * See if downstream signaled cancellation to terminate the operator, if so, react to the cancellation. * * @param downstream the downstream. * @param messageDropped the message that gets dropped if cancellation was signaled. * @return true if canceled, false otherwise. */ private boolean terminateIfCancelled(CoreSubscriber<? super Message> downstream, Message messageDropped) { if (cancelled) { Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); return true; } return false; } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p/> * See if there is a pending signal for the operator termination with error or completion, if so, react to it * by terminating downstream. * * @param d indicate if the operator termination was signaled. * @param downstream the downstream. * @param messageDropped the message that gets dropped if termination happened. * @return true if terminated, false otherwise. */ private boolean terminateIfErrorOrCompletionSignaled(boolean d, CoreSubscriber<? super Message> downstream, Message messageDropped) { if (d) { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); Throwable e = error; if (e != null && e != Exceptions.TERMINATED) { e = Exceptions.terminate(ERROR, this); Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); logBuilder.log("MessageFlux reached a terminal error-state, signaling it downstream.", e); downstream.onError(e); return true; } Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); logBuilder.log("MessageFlux reached a terminal completion-state, signaling it downstream."); downstream.onComplete(); return true; } return false; } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p/> * 1. When retry is enabled (i.e., NULL_RETRY_POLICY is not set) then schedule request (retry) for the next mediator if * <ul> * <li>the operator is not in a termination signaled state,</li> * <li> and * <ul> * <li>there is no error Or</li> * <li>error is retriable and the retry is not exhausted.</li> * </ul> * </li> * </ul> * 2. If retry is enabled (i.e., NULL_RETRY_POLICY is not set) and there is 'non-retriable or retry exhaust' * error, then set an error signal for the drain-loop to terminate the operator. * <br/> * 3. If retry is disabled (i.e., NULL_RETRY_POLICY is set), then set an error signal (if first receiver error-ed) * or completion signal (if first receiver completed) for the drain-loop to terminate the operator. * <br/> * @param error the error that leads to error-ed termination of the last mediator or {@code null} * if terminated with completion. * @param downstream the downstream. * @param mediatorHolder the mediator holder. */ private void setTerminationSignalOrScheduleNextMediatorRequest(Throwable error, CoreSubscriber<? super Message> downstream, MediatorHolder mediatorHolder) { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); if (cancelled || done) { logBuilder.log("MessageFlux reached terminal-state [done:{}, cancelled:{}].", done, cancelled); return; } if (retryPolicy == NULL_RETRY_POLICY) { if (error == null) { onComplete(); } else { onError(error); } return; } final Duration delay; if (error == null) { delay = Duration.ofSeconds(1); logBuilder.addKeyValue("retryAfter", delay.toMillis()) .log("Current mediator reached terminal completion-state (retriable:true)."); } else { final int attempt = retryAttempts.incrementAndGet(); delay = retryPolicy.calculateRetryDelay(error, attempt); if (delay != null) { logBuilder.addKeyValue("attempt", attempt) .addKeyValue("retryAfter", delay.toMillis()) .log("Current mediator reached terminal error-state (retriable:true).", error); } else { logBuilder.addKeyValue("attempt", attempt) .log("Current mediator reached terminal error-state (retriable:false) Or MessageFlux retries exhausted.", error); onError(error); return; } } try { scheduleNextMediatorRequest(delay, mediatorHolder); } catch (RejectedExecutionException ree) { final RuntimeException e = Operators.onRejectedExecution(ree, downstream.currentContext()); mediatorHolder.withReceiverInfo(logger.atWarning()) .log("Unable to schedule a request for a new mediator (retriable:false).", e); onError(e); } } /** * Schedule a task to request a new mediator. * * @param delay the backoff duration before requesting the next mediator. * @param mediatorHolder the mediator holder. * @throws RejectedExecutionException if the scheduler is unable to schedule the task. */ private void scheduleNextMediatorRequest(Duration delay, MediatorHolder mediatorHolder) { final Runnable task = () -> { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); if (cancelled || done) { logBuilder.log("During the backoff, MessageFlux reached terminal-state [done:{}, cancelled:{}].", done, cancelled); return; } logBuilder.log("Requesting a new mediator."); upstream.request(1); }; mediatorHolder.nextMediatorRequestDisposable = Schedulers.parallel().schedule(task, delay.toMillis(), TimeUnit.MILLISECONDS); } }
class RecoverableReactorReceiver implements CoreSubscriber<AmqpReceiveLink>, Subscription { private final MediatorHolder mediatorHolder = new MediatorHolder(); private final MessageFlux parent; private final int prefetch; private final CreditFlowMode creditFlowMode; private final AmqpRetryPolicy retryPolicy; private final ClientLogger logger; private final AtomicInteger retryAttempts = new AtomicInteger(); private final CoreSubscriber<? super Message> messageSubscriber; private Subscription upstream; private volatile long requested; @SuppressWarnings("rawtypes") private static final AtomicLongFieldUpdater<RecoverableReactorReceiver> REQUESTED = AtomicLongFieldUpdater.newUpdater(RecoverableReactorReceiver.class, "requested"); private volatile int wip; @SuppressWarnings("rawtypes") private static final AtomicIntegerFieldUpdater<RecoverableReactorReceiver> WIP = AtomicIntegerFieldUpdater.newUpdater(RecoverableReactorReceiver.class, "wip"); private volatile boolean done; private volatile boolean cancelled; private volatile Throwable error; @SuppressWarnings("rawtypes") private static final AtomicReferenceFieldUpdater<RecoverableReactorReceiver, Throwable> ERROR = AtomicReferenceFieldUpdater.newUpdater(RecoverableReactorReceiver.class, Throwable.class, "error"); /** * Create a recoverable-receiver that supports the message-flux to stream messages from the receiver attached * to a messaging entity to the message-flux's downstream subscriber and recover from receiver termination. * * @param parent the parent message-flux. * @param messageSubscriber the message-flux's downstream subscriber to notify the events. * @param prefetch the number of messages that the operator should prefetch from the messaging entity * (for a less chatty network and faster message processing on the client). * @param creditFlowMode the mode indicating how to compute the credit and when to send it to the broker. * @param retryPolicy the retry policy to use to recover from receiver termination. */ RecoverableReactorReceiver(MessageFlux parent, CoreSubscriber<? super Message> messageSubscriber, int prefetch, CreditFlowMode creditFlowMode, AmqpRetryPolicy retryPolicy) { this.parent = parent; this.messageSubscriber = messageSubscriber; this.prefetch = prefetch; this.creditFlowMode = creditFlowMode; this.retryPolicy = retryPolicy; this.logger = parent.logger; } /** * Invoked by the upstream in response to message-flux subscribing to it. * * @param s the subscription handle for requesting receivers from the upstream or terminating upstream * through cancellation when it is no longer needed. */ @Override public void onSubscribe(Subscription s) { if (Operators.validate(upstream, s)) { upstream = s; messageSubscriber.onSubscribe(this); s.request(1); } } /** * Invoked by the upstream to deliver new receiver. * * @param receiver the new receiver. */ @Override public void onNext(AmqpReceiveLink receiver) { if (done) { receiver.closeAsync().subscribe(); Operators.onNextDropped(receiver, messageSubscriber.currentContext()); return; } final ReactorReceiverMediator mediator = new ReactorReceiverMediator(this, receiver, prefetch, creditFlowMode, logger); if (mediatorHolder.trySet(mediator)) { mediator.onParentReady(); } else { logger.atWarning() .addKeyValue("oldLinkName", mediatorHolder.getLinkName()) .addKeyValue(LINK_NAME_KEY, receiver.getLinkName()) .addKeyValue(ENTITY_PATH_KEY, receiver.getEntityPath()) .log("Got a AmqpReceiveLink when the MessageFlux is already terminated."); receiver.closeAsync().subscribe(); Operators.onDiscard(receiver, messageSubscriber.currentContext()); } } /** * Signals operator termination with error. * <ul> * <li>Invoked by the upstream when it errors, or</li> * <li>Invoked by the drain-loop when it detects a 'non-retriable or retry exhaust' error, or </li> * <li>Invoked by the drain-loop when it detects the receiver signaled terminal completion and retry * is disabled (i.e., NULL_RETRY_POLICY is set).</li> * </ul> * * @param e the error signaled. */ @Override public void onError(Throwable e) { if (done) { Operators.onErrorDropped(e, messageSubscriber.currentContext()); return; } if (Exceptions.addThrowable(ERROR, this, e)) { done = true; final String logMessage; if (retryPolicy == NULL_RETRY_POLICY) { logMessage = "Terminal error signal from Upstream|Receiver arrived at MessageFlux."; } else { logMessage = "Terminal error signal from Upstream|RetryLoop arrived at MessageFlux."; } mediatorHolder.withReceiverInfo(logger.atWarning()).log(logMessage, e); drain(null); } else { Operators.onErrorDropped(e, messageSubscriber.currentContext()); } } /** * Signals operator termination with completion. * <ul> * <li>Invoked by the upstream when it completes, or</li> * <li>Invoked by the the drain-loop when it detects first receiver signaled terminal completion and retry * is disabled (i.e., NULL_RETRY_POLICY is set).</li> * </ul> */ @Override public void onComplete() { if (done) { return; } done = true; final String logMessage; if (retryPolicy == NULL_RETRY_POLICY) { logMessage = "Terminal completion signal from Upstream|Receiver arrived at MessageFlux."; } else { logMessage = "Terminal completion signal from Upstream arrived at MessageFlux."; } mediatorHolder.withReceiverInfo(logger.atWarning()).log(logMessage); drain(null); } /** * Invoked by the downstream message subscriber to signal the demand for messages. Whatever has been * requested can be sent downstream, so only signal the demand for what can be safely handled. * No messages will be sent downstream until the demand is signaled. * * @param n the number of messages to send to downstream. */ @Override public void request(long n) { if (Operators.validate(n)) { Operators.addCap(REQUESTED, this, n); drain(null); } } /** * Invoked by downstream to signal termination by cancellation. */ @Override public void cancel() { if (cancelled) { return; } cancelled = true; mediatorHolder.withReceiverInfo(logger.atWarning()) .log("Downstream cancellation signal arrived at MessageFlux."); if (WIP.getAndIncrement(this) == 0) { upstream.cancel(); mediatorHolder.freeze(); } } /** * Invoked by the new mediator when it is ready to be used. A mediator constructed in 'onNext' moves * to ready state when its backing receiver is active. * * @param updateDispositionFunc the function to disposition messages from mediator's backing receiver. */ void onMediatorReady(DispositionFunction updateDispositionFunc) { retryAttempts.set(0); parent.onNextUpdateDispositionFunction(updateDispositionFunc); drain(null); } /** * The serialized entry point to drain-loop. * * @param dataSignal the message to drop if the operator is terminated by cancellation. */ void drain(Message dataSignal) { if (WIP.getAndIncrement(this) != 0) { if (dataSignal != null && cancelled) { Operators.onDiscard(dataSignal, messageSubscriber.currentContext()); } return; } drainLoop(); } /** * The serialized drain-loop (implementation patterns inspired from RxJava, Reactor Operators). * Reference: 'Operator Concurrency Primitives' series https: */ /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p> * See if downstream signaled cancellation to terminate the operator, if so, react to the cancellation. * </p> * @param downstream the downstream. * @param messageDropped the message that gets dropped if cancellation was signaled. * @return true if canceled, false otherwise. */ private boolean terminateIfCancelled(CoreSubscriber<? super Message> downstream, Message messageDropped) { if (cancelled) { Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); return true; } return false; } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p> * See if there is a pending signal for the operator termination with error or completion, if so, react to it * by terminating downstream. * </p> * * @param d indicate if the operator termination was signaled. * @param downstream the downstream. * @param messageDropped the message that gets dropped if termination happened. * @return true if terminated, false otherwise. */ private boolean terminateIfErrorOrCompletionSignaled(boolean d, CoreSubscriber<? super Message> downstream, Message messageDropped) { if (d) { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); Throwable e = error; if (e != null && e != Exceptions.TERMINATED) { e = Exceptions.terminate(ERROR, this); Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); logBuilder.log("MessageFlux reached a terminal error-state, signaling it downstream.", e); downstream.onError(e); return true; } Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); logBuilder.log("MessageFlux reached a terminal completion-state, signaling it downstream."); downstream.onComplete(); return true; } return false; } /** * * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p></p> * 1. When retry is enabled (i.e., NULL_RETRY_POLICY is not set) then schedule request (retry) for the next mediator if * <ul> * <li>the operator is not in a termination signaled state,</li> * <li> and * <ul> * <li>there is no error Or</li> * <li>error is retriable and the retry is not exhausted.</li> * </ul> * </li> * </ul> * 2. If retry is enabled (i.e., NULL_RETRY_POLICY is not set) and there is 'non-retriable or retry exhaust' * error, then set an error signal for the drain-loop to terminate the operator. * <br/> * 3. If retry is disabled (i.e., NULL_RETRY_POLICY is set), then set an error signal (if first receiver error-ed) * or completion signal (if first receiver completed) for the drain-loop to terminate the operator. * <p></p> * @param error the error that leads to error-ed termination of the last mediator or {@code null} * if terminated with completion. * @param downstream the downstream. * @param mediatorHolder the mediator holder. */ private void setTerminationSignalOrScheduleNextMediatorRequest(Throwable error, CoreSubscriber<? super Message> downstream, MediatorHolder mediatorHolder) { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); if (cancelled || done) { logBuilder.log("MessageFlux reached terminal-state [done:{}, cancelled:{}].", done, cancelled); return; } if (retryPolicy == NULL_RETRY_POLICY) { if (error == null) { onComplete(); } else { onError(error); } return; } final Duration delay; if (error == null) { delay = Duration.ofSeconds(1); logBuilder.addKeyValue("retryAfter", delay.toMillis()) .log("Current mediator reached terminal completion-state (retriable:true)."); } else { final int attempt = retryAttempts.incrementAndGet(); delay = retryPolicy.calculateRetryDelay(error, attempt); if (delay != null) { logBuilder.addKeyValue("attempt", attempt) .addKeyValue("retryAfter", delay.toMillis()) .log("Current mediator reached terminal error-state (retriable:true).", error); } else { logBuilder.addKeyValue("attempt", attempt) .log("Current mediator reached terminal error-state (retriable:false) Or MessageFlux retries exhausted.", error); onError(error); return; } } try { scheduleNextMediatorRequest(delay, mediatorHolder); } catch (RejectedExecutionException ree) { final RuntimeException e = Operators.onRejectedExecution(ree, downstream.currentContext()); mediatorHolder.withReceiverInfo(logger.atWarning()) .log("Unable to schedule a request for a new mediator (retriable:false).", e); onError(e); } } /** * Schedule a task to request a new mediator. * * @param delay the backoff duration before requesting the next mediator. * @param mediatorHolder the mediator holder. * @throws RejectedExecutionException if the scheduler is unable to schedule the task. */ private void scheduleNextMediatorRequest(Duration delay, MediatorHolder mediatorHolder) { final Runnable task = () -> { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); if (cancelled || done) { logBuilder.log("During the backoff, MessageFlux reached terminal-state [done:{}, cancelled:{}].", done, cancelled); return; } logBuilder.log("Requesting a new mediator."); upstream.request(1); }; mediatorHolder.nextMediatorRequestDisposable = Schedulers.parallel().schedule(task, delay.toMillis(), TimeUnit.MILLISECONDS); } }
I think this is odd because if it returns a mono, we should just chain it with the next operator. I looked at the current implementation and could we do something like this instead to make it more reactive? ```java Mono<Boolean> beginClose() { if (isV2) { return receiveLinkHandler2.preClose(); } else { return Mono.empty(); } } ``` calling code: ``` handler.beginClose().then(localCloseMono); ```
private Mono<Boolean> beginClose(ErrorCondition errorCondition) { final Runnable localClose = () -> { if (receiver.getLocalState() != EndpointState.CLOSED) { receiver.close(); if (receiver.getCondition() == null) { receiver.setCondition(errorCondition); } } }; final Mono<Boolean> localCloseMono = Mono.create(sink -> { boolean localCloseScheduled = false; try { dispatcher.invoke(localClose); localCloseScheduled = true; } catch (IOException e) { logger.warning("IO sink was closed when scheduling work. Manually invoking and completing close.", e); localClose.run(); terminateEndpointState(); completeClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException when scheduling on ReactorDispatcher. Manually invoking and completing close."); localClose.run(); terminateEndpointState(); completeClose(); } finally { sink.success(localCloseScheduled); } }); return handler.beginClose(localCloseMono); }
return handler.beginClose(localCloseMono);
private Mono<Boolean> beginClose(ErrorCondition errorCondition) { final Runnable localClose = () -> { if (receiver.getLocalState() != EndpointState.CLOSED) { receiver.close(); if (receiver.getCondition() == null) { receiver.setCondition(errorCondition); } } }; final Mono<Boolean> localCloseMono = Mono.create(sink -> { boolean localCloseScheduled = false; try { dispatcher.invoke(localClose); localCloseScheduled = true; } catch (IOException e) { logger.warning("IO sink was closed when scheduling work. Manually invoking and completing close.", e); localClose.run(); terminateEndpointState(); completeClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException when scheduling on ReactorDispatcher. Manually invoking and completing close."); localClose.run(); terminateEndpointState(); completeClose(); } finally { sink.success(localCloseScheduled); } }); return handler.beginClose().then(localCloseMono); }
class ReactorReceiver implements AmqpReceiveLink, AsyncCloseable, AutoCloseable { private static final Symbol SEQUENCE_NUMBER_ANNOTATION = Symbol.valueOf(SEQUENCE_NUMBER_ANNOTATION_NAME.getValue()); private final String entityPath; private final Receiver receiver; private final ReceiveLinkHandlerWrapper handler; private final TokenManager tokenManager; private final ReactorDispatcher dispatcher; private final Disposable subscriptions; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isCompleteCloseCalled = new AtomicBoolean(); private final Flux<Message> messagesProcessor; private final AmqpRetryOptions retryOptions; private final ClientLogger logger; private final boolean isV2; private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<AmqpEndpointState> terminateEndpointStates = Sinks.empty(); private final AtomicReference<Supplier<Integer>> creditSupplier = new AtomicReference<>(); private final AmqpMetricsProvider metricsProvider; private final AtomicLong lastSequenceNumber = new AtomicLong(); private final AutoCloseable trackPrefetchSeqNoSubscription; protected ReactorReceiver(AmqpConnection amqpConnection, String entityPath, Receiver receiver, ReceiveLinkHandlerWrapper handler, TokenManager tokenManager, ReactorDispatcher dispatcher, AmqpRetryOptions retryOptions, AmqpMetricsProvider metricsProvider) { this.entityPath = entityPath; this.receiver = receiver; this.handler = handler; this.tokenManager = tokenManager; this.dispatcher = dispatcher; this.metricsProvider = metricsProvider; this.trackPrefetchSeqNoSubscription = this.metricsProvider.trackPrefetchSequenceNumber(lastSequenceNumber::get); Map<String, Object> loggingContext = createContextWithConnectionId(handler.getConnectionId()); loggingContext.put(LINK_NAME_KEY, this.handler.getLinkName()); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(ReactorReceiver.class, loggingContext); handler.setLogger(this.logger); this.isV2 = handler.isV2(); if (!this.isV2) { this.messagesProcessor = this.handler.getDeliveredMessagesV1() .flatMap(delivery -> { return Mono.create(sink -> { try { this.dispatcher.invoke(() -> { if (isDisposed()) { sink.error(new IllegalStateException( "Cannot decode delivery when ReactorReceiver instance is closed.")); return; } final Message message = decodeDelivery(delivery); if (metricsProvider.isPrefetchedSequenceNumberEnabled()) { Long seqNo = getSequenceNumber(message); if (seqNo != null) { lastSequenceNumber.set(seqNo); } } final int creditsLeft = receiver.getRemoteCredit(); if (creditsLeft > 0) { sink.success(message); return; } final Supplier<Integer> supplier = creditSupplier.get(); final Integer credits = supplier.get(); if (credits != null && credits > 0) { logger.atVerbose() .addKeyValue("credits", credits) .log("Adding credits."); receiver.flow(credits); } else { logger.atVerbose() .addKeyValue("credits", credits) .log("There are no credits to add."); } metricsProvider.recordAddCredits(credits == null ? 0 : credits); sink.success(message); }); } catch (IOException | RejectedExecutionException e) { sink.error(e); } }); }, 1); } else { this.messagesProcessor = this.handler.getDeliveredMessagesV2() .map(message -> { if (metricsProvider.isPrefetchedSequenceNumberEnabled()) { Long seqNo = getSequenceNumber(message); if (seqNo != null) { lastSequenceNumber.set(seqNo); } } return message; }); } this.retryOptions = retryOptions; this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.atVerbose() .log("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { final String message = isDisposed.getAndSet(true) ? "This was already disposed. Dropping error." : "Freeing resources due to error."; logger.atInfo() .log(message); completeClose(); }) .doOnComplete(() -> { final String message = isDisposed.getAndSet(true) ? "This was already disposed." : "Freeing resources."; logger.atVerbose() .log(message); completeClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.tokenManager.getAuthorizationResults() .onErrorResume(error -> { final Mono<Void> operation = closeAsync("Token renewal failure. Disposing receive link.", new ErrorCondition(Symbol.getSymbol(AmqpErrorCondition.NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> logger.atVerbose() .addKeyValue("response", response) .log("Token refreshed."), error -> { }, () -> { logger.atVerbose() .log("Authorization completed."); closeAsync("Authorization completed. Disposing.", null).subscribe(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); return closeAsync("Connection shutdown.", null); }).subscribe()); } @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates .distinctUntilChanged() .takeUntilOther(terminateEndpointStates.asMono()); } @Override public String getConnectionId() { return handler.getConnectionId(); } @Override public Flux<Message> receive() { return messagesProcessor; } @Override public Mono<Void> updateDisposition(String deliveryTag, DeliveryState deliveryState) { return handler.sendDisposition(deliveryTag, deliveryState); } @Override public Mono<Void> addCredits(int credits) { if (isDisposed()) { return monoError(logger, new IllegalStateException("Cannot add credits to closed link: " + getLinkName())); } return Mono.create(sink -> { try { dispatcher.invoke(() -> { receiver.flow(credits); metricsProvider.recordAddCredits(credits); sink.success(); }); } catch (IOException e) { sink.error(new UncheckedIOException(String.format( "connectionId[%s] linkName[%s] Unable to schedule work to add more credits.", handler.getConnectionId(), getLinkName()), e)); } catch (RejectedExecutionException e) { sink.error(e); } }); } @Override public void addCredit(Supplier<Long> creditSupplier) { assert isV2; if (isDisposed()) { throw new RejectedExecutionException("Cannot schedule credit flow when the link is disposed."); } try { dispatcher.invoke(() -> { final long credit = creditSupplier.get(); receiver.flow((int) credit); metricsProvider.recordAddCredits((int) credit); }); } catch (IOException e) { throw new UncheckedIOException("Unable to schedule credit flow.", e); } } @Override public int getCredits() { return receiver.getRemoteCredit(); } @Override public void setEmptyCreditListener(Supplier<Integer> creditSupplier) { assert !isV2; Objects.requireNonNull(creditSupplier, "'creditSupplier' cannot be null."); this.creditSupplier.set(creditSupplier); } @Override public String getLinkName() { return handler.getLinkName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override public boolean isDisposed() { return isDisposed.get(); } @Override public void dispose() { close(); } @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } protected Message decodeDelivery(Delivery delivery) { assert !isV2; final int messageSize = delivery.pending(); final byte[] buffer = new byte[messageSize]; final int read = receiver.recv(buffer, 0, messageSize); receiver.advance(); final Message message = Proton.message(); message.decode(buffer, 0, read); delivery.settle(); return message; } /** * Disposes of the receiver when an exception is encountered. * <p> * While {@link ReactorReceiver * contract, this API performs the same disposal with additional * contextual information. For example, the context may indicate if the resource needs to be disposed of * internally when there is an error in the link, session or connection. * </p> * * <p> * Closing ReactorReceiver involves 3 stages, running in following order - * <ul> * <li>local-close (client to broker) via beginClose() </li> * <li>remote-close ack (broker to client)</li> * <li>disposal of ReactorReceiver resources via completeClose()</li> * </ul> * @link <a href="https: * * @param message Message to log. * @param errorCondition Error condition associated with close operation. */ protected Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return getIsClosedMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .log("Setting error condition and disposing. {}", message); return beginClose(errorCondition) .flatMap(localCloseScheduled -> { if (localCloseScheduled) { return timeoutRemoteCloseAck(); } else { return Mono.empty(); } }) .publishOn(Schedulers.boundedElastic()); } /** * Gets the Mono that signals completion when the disposal/closing of ReactorReceiver is completed. * * @return the disposal/closing completion Mono. */ protected Mono<Void> getIsClosedMono() { return isClosedMono.asMono().publishOn(Schedulers.boundedElastic()); } protected void onHandlerClose() { assert !isV2; } /** * Begins the client side close by requesting receive link handler for any graceful resource * cleanup, then initiating local-close on underlying receiver. * * @param errorCondition Error condition associated with close operation. * @return a {@link Mono} when subscribed attempt to initiate local-close, emitting {@code true} * if local-close is scheduled on the dispatcher, emits {@code false} if unable to schedule * local-close that lead to manual close. */ /** * Apply timeout on remote-close ack. If timeout happens, i.e., if remote-close ack doesn't arrive within * the timeout duration, then terminate the Flux returned by getEndpointStates() and complete close. * * a {@link Mono} that registers remote-close ack timeout based close cleanup. */ private Mono<Void> timeoutRemoteCloseAck() { return isClosedMono.asMono() .timeout(retryOptions.getTryTimeout()) .onErrorResume(error -> { if (error instanceof TimeoutException) { logger.info("Timeout waiting for RemoteClose. Manually terminating EndpointStates and completing close."); terminateEndpointState(); completeClose(); } return Mono.empty(); }) .subscribeOn(Schedulers.boundedElastic()); } /** * Terminate the Flux returned by the getEndpointStates() API. * * <p> * The termination of Flux returned by getEndpointStates() is the signal that "AmqpReceiveLinkProcessor" * uses to either terminate its downstream or obtain a new ReactorReceiver to continue delivering events * downstream. * </p> */ private void terminateEndpointState() { terminateEndpointStates.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atVerbose(), signalType, emitResult) .log("Could not emit EndpointStates termination."); return false; }); } /** * Completes the closing of the underlying receiver, which includes disposing of subscriptions, * closing of token manager, and releasing of protonJ resources. * <p> * The completeClose invoked in 3 cases - when the broker ack for beginClose (i.e. ack via * remote-close frame), if the broker ack for beginClose never comes through within timeout, * if the client fails to run beginClose. * </p> */ private void completeClose() { if (isCompleteCloseCalled.getAndSet(true)) { return; } isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } handler.close(); if (!isV2) { onHandlerClose(); } receiver.free(); try { trackPrefetchSeqNoSubscription.close(); } catch (Exception e) { logger.verbose("Error closing metrics subscription.", e); } } private Long getSequenceNumber(Message message) { if (message == null || message.getMessageAnnotations() == null || message.getBody() == null) { return null; } Map<Symbol, Object> properties = message.getMessageAnnotations().getValue(); Object seqNo = properties != null ? properties.get(SEQUENCE_NUMBER_ANNOTATION) : null; if (seqNo instanceof Integer) { return ((Integer) seqNo).longValue(); } else if (seqNo instanceof Long) { return (Long) seqNo; } else if (seqNo != null) { logger.verbose("Received message has unexpected `x-opt-sequence-number` annotation value - `{}`. Ignoring it.", seqNo); } return null; } @Override public String toString() { return String.format("connectionId: [%s] entity path: [%s] linkName: [%s]", receiver.getName(), entityPath, getLinkName()); } }
class ReactorReceiver implements AmqpReceiveLink, AsyncCloseable, AutoCloseable { private static final Symbol SEQUENCE_NUMBER_ANNOTATION = Symbol.valueOf(SEQUENCE_NUMBER_ANNOTATION_NAME.getValue()); private final String entityPath; private final Receiver receiver; private final ReceiveLinkHandlerWrapper handler; private final TokenManager tokenManager; private final ReactorDispatcher dispatcher; private final Disposable subscriptions; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isCompleteCloseCalled = new AtomicBoolean(); private final Flux<Message> messagesProcessor; private final AmqpRetryOptions retryOptions; private final ClientLogger logger; private final boolean isV2; private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<AmqpEndpointState> terminateEndpointStates = Sinks.empty(); private final AtomicReference<Supplier<Integer>> creditSupplier = new AtomicReference<>(); private final AmqpMetricsProvider metricsProvider; private final AtomicLong lastSequenceNumber = new AtomicLong(); private final AutoCloseable trackPrefetchSeqNoSubscription; protected ReactorReceiver(AmqpConnection amqpConnection, String entityPath, Receiver receiver, ReceiveLinkHandlerWrapper handler, TokenManager tokenManager, ReactorDispatcher dispatcher, AmqpRetryOptions retryOptions, AmqpMetricsProvider metricsProvider) { this.entityPath = entityPath; this.receiver = receiver; this.handler = handler; this.tokenManager = tokenManager; this.dispatcher = dispatcher; this.metricsProvider = metricsProvider; this.trackPrefetchSeqNoSubscription = this.metricsProvider.trackPrefetchSequenceNumber(lastSequenceNumber::get); Map<String, Object> loggingContext = createContextWithConnectionId(handler.getConnectionId()); loggingContext.put(LINK_NAME_KEY, this.handler.getLinkName()); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(ReactorReceiver.class, loggingContext); handler.setLogger(this.logger); this.isV2 = handler.isV2(); if (!this.isV2) { this.messagesProcessor = this.handler.getDeliveredMessagesV1() .flatMap(delivery -> { return Mono.create(sink -> { try { this.dispatcher.invoke(() -> { if (isDisposed()) { sink.error(new IllegalStateException( "Cannot decode delivery when ReactorReceiver instance is closed.")); return; } final Message message = decodeDelivery(delivery); if (metricsProvider.isPrefetchedSequenceNumberEnabled()) { Long seqNo = getSequenceNumber(message); if (seqNo != null) { lastSequenceNumber.set(seqNo); } } final int creditsLeft = receiver.getRemoteCredit(); if (creditsLeft > 0) { sink.success(message); return; } final Supplier<Integer> supplier = creditSupplier.get(); final Integer credits = supplier.get(); if (credits != null && credits > 0) { logger.atVerbose() .addKeyValue("credits", credits) .log("Adding credits."); receiver.flow(credits); } metricsProvider.recordAddCredits(credits == null ? 0 : credits); sink.success(message); }); } catch (IOException | RejectedExecutionException e) { sink.error(e); } }); }, 1); } else { if (metricsProvider.isPrefetchedSequenceNumberEnabled()) { this.messagesProcessor = this.handler.getDeliveredMessagesV2() .map(message -> { final Long seqNo = getSequenceNumber(message); if (seqNo != null) { lastSequenceNumber.set(seqNo); } return message; }); } else { this.messagesProcessor = this.handler.getDeliveredMessagesV2(); } } this.retryOptions = retryOptions; this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.atVerbose() .log("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { final String message = isDisposed.getAndSet(true) ? "This was already disposed. Dropping error." : "Freeing resources due to error."; logger.atInfo() .log(message); completeClose(); }) .doOnComplete(() -> { final String message = isDisposed.getAndSet(true) ? "This was already disposed." : "Freeing resources."; logger.atVerbose() .log(message); completeClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.tokenManager.getAuthorizationResults() .onErrorResume(error -> { final Mono<Void> operation = closeAsync("Token renewal failure. Disposing receive link.", new ErrorCondition(Symbol.getSymbol(AmqpErrorCondition.NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> logger.atVerbose() .addKeyValue("response", response) .log("Token refreshed."), error -> { }, () -> { logger.atVerbose() .log("Authorization completed."); closeAsync("Authorization completed. Disposing.", null).subscribe(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); return closeAsync("Connection shutdown.", null); }).subscribe()); } @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates .distinctUntilChanged() .takeUntilOther(terminateEndpointStates.asMono()); } @Override public String getConnectionId() { return handler.getConnectionId(); } @Override public Flux<Message> receive() { return messagesProcessor; } @Override public Mono<Void> updateDisposition(String deliveryTag, DeliveryState deliveryState) { return handler.sendDisposition(deliveryTag, deliveryState); } @Override public Mono<Void> addCredits(int credits) { if (isDisposed()) { return monoError(logger, new IllegalStateException("Cannot add credits to closed link: " + getLinkName())); } return Mono.create(sink -> { try { dispatcher.invoke(() -> { receiver.flow(credits); metricsProvider.recordAddCredits(credits); sink.success(); }); } catch (IOException e) { sink.error(new UncheckedIOException(String.format( "connectionId[%s] linkName[%s] Unable to schedule work to add more credits.", handler.getConnectionId(), getLinkName()), e)); } catch (RejectedExecutionException e) { sink.error(e); } }); } @Override public void addCredit(Supplier<Long> creditSupplier) { assert isV2; if (isDisposed()) { throw new RejectedExecutionException("Cannot schedule credit flow when the link is disposed."); } try { dispatcher.invoke(() -> { final long credit = creditSupplier.get(); receiver.flow((int) credit); metricsProvider.recordAddCredits((int) credit); }); } catch (IOException e) { throw new UncheckedIOException("Unable to schedule credit flow.", e); } } @Override public int getCredits() { return receiver.getRemoteCredit(); } @Override public void setEmptyCreditListener(Supplier<Integer> creditSupplier) { assert !isV2; Objects.requireNonNull(creditSupplier, "'creditSupplier' cannot be null."); this.creditSupplier.set(creditSupplier); } @Override public String getLinkName() { return handler.getLinkName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override public boolean isDisposed() { return isDisposed.get(); } @Override public void dispose() { close(); } @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } protected Message decodeDelivery(Delivery delivery) { assert !isV2; final int messageSize = delivery.pending(); final byte[] buffer = new byte[messageSize]; final int read = receiver.recv(buffer, 0, messageSize); receiver.advance(); final Message message = Proton.message(); message.decode(buffer, 0, read); delivery.settle(); return message; } /** * Disposes of the receiver when an exception is encountered. * <p> * While {@link ReactorReceiver * contract, this API performs the same disposal with additional * contextual information. For example, the context may indicate if the resource needs to be disposed of * internally when there is an error in the link, session or connection. * </p> * * <p> * Closing ReactorReceiver involves 3 stages, running in following order - * <ul> * <li>local-close (client to broker) via beginClose() </li> * <li>remote-close ack (broker to client)</li> * <li>disposal of ReactorReceiver resources via completeClose()</li> * </ul> * @link <a href="https: * * @param message Message to log. * @param errorCondition Error condition associated with close operation. */ protected Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return getIsClosedMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .log("Setting error condition and disposing. {}", message); return beginClose(errorCondition) .flatMap(localCloseScheduled -> { if (localCloseScheduled) { return timeoutRemoteCloseAck(); } else { return Mono.empty(); } }) .publishOn(Schedulers.boundedElastic()); } /** * Gets the Mono that signals completion when the disposal/closing of ReactorReceiver is completed. * * @return the disposal/closing completion Mono. */ protected Mono<Void> getIsClosedMono() { return isClosedMono.asMono().publishOn(Schedulers.boundedElastic()); } protected void onHandlerClose() { assert !isV2; } /** * Begins the client side close by requesting receive link handler for any graceful resource * cleanup, then initiating local-close on underlying receiver. * * @param errorCondition Error condition associated with close operation. * @return a {@link Mono} when subscribed attempt to initiate local-close, emitting {@code true} * if local-close is scheduled on the dispatcher, emits {@code false} if unable to schedule * local-close that lead to manual close. */ /** * Apply timeout on remote-close ack. If timeout happens, i.e., if remote-close ack doesn't arrive within * the timeout duration, then terminate the Flux returned by getEndpointStates() and complete close. * * a {@link Mono} that registers remote-close ack timeout based close cleanup. */ private Mono<Void> timeoutRemoteCloseAck() { return isClosedMono.asMono() .timeout(retryOptions.getTryTimeout()) .onErrorResume(error -> { if (error instanceof TimeoutException) { logger.info("Timeout waiting for RemoteClose. Manually terminating EndpointStates and completing close."); terminateEndpointState(); completeClose(); } return Mono.empty(); }) .subscribeOn(Schedulers.boundedElastic()); } /** * Terminate the Flux returned by the getEndpointStates() API. * * <p> * The termination of Flux returned by getEndpointStates() is the signal that "AmqpReceiveLinkProcessor" * uses to either terminate its downstream or obtain a new ReactorReceiver to continue delivering events * downstream. * </p> */ private void terminateEndpointState() { terminateEndpointStates.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atVerbose(), signalType, emitResult) .log("Could not emit EndpointStates termination."); return false; }); } /** * Completes the closing of the underlying receiver, which includes disposing of subscriptions, * closing of token manager, and releasing of protonJ resources. * <p> * The completeClose invoked in 3 cases - when the broker ack for beginClose (i.e. ack via * remote-close frame), if the broker ack for beginClose never comes through within timeout, * if the client fails to run beginClose. * </p> */ private void completeClose() { if (isCompleteCloseCalled.getAndSet(true)) { return; } isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } handler.close(); if (!isV2) { onHandlerClose(); } receiver.free(); try { trackPrefetchSeqNoSubscription.close(); } catch (Exception e) { logger.verbose("Error closing metrics subscription.", e); } } private Long getSequenceNumber(Message message) { if (message == null || message.getMessageAnnotations() == null || message.getBody() == null) { return null; } Map<Symbol, Object> properties = message.getMessageAnnotations().getValue(); Object seqNo = properties != null ? properties.get(SEQUENCE_NUMBER_ANNOTATION) : null; if (seqNo instanceof Integer) { return ((Integer) seqNo).longValue(); } else if (seqNo instanceof Long) { return (Long) seqNo; } else if (seqNo != null) { logger.verbose("Received message has unexpected `x-opt-sequence-number` annotation value - `{}`. Ignoring it.", seqNo); } return null; } @Override public String toString() { return String.format("connectionId: [%s] entity path: [%s] linkName: [%s]", receiver.getName(), entityPath, getLinkName()); } }
Ha.. there are a few nop text in this class, will rename to no-op. **[DONE]**
public void cancel() { if (cancelled) { return; } cancelled = true; mediatorHolder.withReceiverInfo(logger.atWarning()) .log("Downstream cancellation signal arrived at MessageFlux."); if (WIP.getAndIncrement(this) == 0) { upstream.cancel(); mediatorHolder.freeze(); } }
public void cancel() { if (cancelled) { return; } cancelled = true; mediatorHolder.withReceiverInfo(logger.atWarning()) .log("Downstream cancellation signal arrived at MessageFlux."); if (WIP.getAndIncrement(this) == 0) { upstream.cancel(); mediatorHolder.freeze(); } }
class RecoverableReactorReceiver implements CoreSubscriber<AmqpReceiveLink>, Subscription { private final MediatorHolder mediatorHolder = new MediatorHolder(); private final MessageFlux parent; private final int prefetch; private final CreditFlowMode creditFlowMode; private final AmqpRetryPolicy retryPolicy; private final ClientLogger logger; private final AtomicInteger retryAttempts = new AtomicInteger(); private final CoreSubscriber<? super Message> messageSubscriber; private Subscription upstream; private volatile long requested; @SuppressWarnings("rawtypes") private static final AtomicLongFieldUpdater<RecoverableReactorReceiver> REQUESTED = AtomicLongFieldUpdater.newUpdater(RecoverableReactorReceiver.class, "requested"); private volatile int wip; @SuppressWarnings("rawtypes") private static final AtomicIntegerFieldUpdater<RecoverableReactorReceiver> WIP = AtomicIntegerFieldUpdater.newUpdater(RecoverableReactorReceiver.class, "wip"); private volatile boolean done; private volatile boolean cancelled; volatile Throwable error; @SuppressWarnings("rawtypes") static final AtomicReferenceFieldUpdater<RecoverableReactorReceiver, Throwable> ERROR = AtomicReferenceFieldUpdater.newUpdater(RecoverableReactorReceiver.class, Throwable.class, "error"); /** * Create a recoverable-receiver that supports the message-flux to stream messages from the receiver attached * to a messaging entity to the message-flux's downstream subscriber and recover from receiver termination. * * @param parent the parent message-flux. * @param messageSubscriber the message-flux's downstream subscriber to notify the events. * @param prefetch the number of messages that the operator should prefetch from the messaging entity * (for a less chatty network and faster message processing on the client). * @param creditFlowMode the mode indicating how to compute the credit and when to send it to the broker. * @param retryPolicy the retry policy to use to recover from receiver termination. */ RecoverableReactorReceiver(MessageFlux parent, CoreSubscriber<? super Message> messageSubscriber, int prefetch, CreditFlowMode creditFlowMode, AmqpRetryPolicy retryPolicy) { this.parent = parent; this.messageSubscriber = messageSubscriber; this.prefetch = prefetch; this.creditFlowMode = creditFlowMode; this.retryPolicy = retryPolicy; this.logger = parent.logger; } /** * Invoked by the upstream in response to message-flux subscribing to it. * * @param s the subscription handle for requesting receivers from the upstream or terminating upstream * through cancellation when it is no longer needed. */ @Override public void onSubscribe(Subscription s) { if (Operators.validate(upstream, s)) { upstream = s; messageSubscriber.onSubscribe(this); s.request(1); } } /** * Invoked by the upstream to deliver new receiver. * * @param receiver the new receiver. */ @Override public void onNext(AmqpReceiveLink receiver) { if (done) { receiver.closeAsync().subscribe(); Operators.onNextDropped(receiver, messageSubscriber.currentContext()); return; } final ReactorReceiverMediator mediator = new ReactorReceiverMediator(this, receiver, prefetch, creditFlowMode, logger); if (mediatorHolder.trySet(mediator)) { mediator.onParentReady(); } else { logger.atWarning() .addKeyValue("oldLinkName", mediatorHolder.getLinkName()) .addKeyValue(LINK_NAME_KEY, receiver.getLinkName()) .addKeyValue(ENTITY_PATH_KEY, receiver.getEntityPath()) .log("Got a AmqpReceiveLink when the MessageFlux is already terminated."); receiver.closeAsync().subscribe(); Operators.onDiscard(receiver, messageSubscriber.currentContext()); } } /** * Signals operator termination with error. * <ul> * <li>Invoked by the upstream when it errors, or</li> * <li>Invoked by the drain-loop when it detects a 'non-retriable or retry exhaust' error, or </li> * <li>Invoked by the drain-loop when it detects the receiver signaled terminal completion and retry * is disabled (i.e., NULL_RETRY_POLICY is set).</li> * </ul> * * @param e the error signaled. */ @Override public void onError(Throwable e) { if (done) { Operators.onErrorDropped(e, messageSubscriber.currentContext()); return; } if (Exceptions.addThrowable(ERROR, this, e)) { done = true; final String logMessage; if (retryPolicy == NULL_RETRY_POLICY) { logMessage = "Terminal error signal from Upstream|Receiver arrived at MessageFlux."; } else { logMessage = "Terminal error signal from Upstream|RetryLoop arrived at MessageFlux."; } mediatorHolder.withReceiverInfo(logger.atWarning()).log(logMessage, e); drain(null); } else { Operators.onErrorDropped(e, messageSubscriber.currentContext()); } } /** * Signals operator termination with completion. * <ul> * <li>Invoked by the upstream when it completes, or</li> * <li>Invoked by the the drain-loop when it detects first receiver signaled terminal completion and retry * is disabled (i.e., NULL_RETRY_POLICY is set).</li> * </ul> */ @Override public void onComplete() { if (done) { return; } done = true; final String logMessage; if (retryPolicy == NULL_RETRY_POLICY) { logMessage = "Terminal completion signal from Upstream|Receiver arrived at MessageFlux."; } else { logMessage = "Terminal completion signal from Upstream arrived at MessageFlux."; } mediatorHolder.withReceiverInfo(logger.atWarning()).log(logMessage); drain(null); } /** * Invoked by the downstream message subscriber to signal the demand for messages. Whatever has been * requested can be sent downstream, so only signal the demand for what can be safely handled. * No messages will be sent downstream until the demand is signaled. * * @param n the number of messages to send to downstream. */ @Override public void request(long n) { if (Operators.validate(n)) { Operators.addCap(REQUESTED, this, n); drain(null); } } /** * Invoked by downstream to signal termination by cancellation. */ @Override /** * Invoked by the new mediator when it is ready to be used. A mediator constructed in 'onNext' moves * to ready state when its backing receiver is active. * * @param updateDispositionFunc the function to disposition messages from mediator's backing receiver. */ void onMediatorReady(BiFunction<String, DeliveryState, Mono<Void>> updateDispositionFunc) { retryAttempts.set(0); parent.onNextUpdateDispositionFunction(updateDispositionFunc); drain(null); } /** * The serialized entry point to drain-loop. * * @param dataSignal the message to drop if the operator is terminated by cancellation. */ void drain(Message dataSignal) { if (WIP.getAndIncrement(this) != 0) { if (dataSignal != null && cancelled) { Operators.onDiscard(dataSignal, messageSubscriber.currentContext()); } return; } drainLoop(); } /** * The serialized drain-loop (implementation patterns inspired from RxJava, Reactor Operators). * Reference: 'Operator Concurrency Primitives' series https: */ private void drainLoop() { int missed = 1; CoreSubscriber<? super Message> downstream = messageSubscriber; for (; ;) { boolean d = done; ReactorReceiverMediator mediator = mediatorHolder.mediator; boolean hasMediator = mediator != null; if (terminateIfCancelled(downstream, null)) { return; } if (terminateIfErrorOrCompletionSignaled(d, downstream, null)) { return; } long r = requested; long emitted = 0L; boolean mediatorTerminatedAndDrained = false; if (r != 0L && hasMediator) { Queue<Message> q = mediator.queue; while (emitted != r) { Message message = q.poll(); if (terminateIfCancelled(downstream, message)) { return; } if (terminateIfErrorOrCompletionSignaled(done, downstream, message)) { return; } boolean empty = message == null; if (empty && mediator.done) { mediatorTerminatedAndDrained = true; break; } if (empty) { break; } messageSubscriber.onNext(message); emitted++; } if (emitted == r) { if (mediator.queue.isEmpty() && mediator.done) { mediatorTerminatedAndDrained = true; } } if (emitted != 0 && r != Long.MAX_VALUE) { r = REQUESTED.addAndGet(this, -emitted); } mediator.update(r, emitted); } if (r == 0L && hasMediator) { if (terminateIfCancelled(downstream, null)) { return; } if (terminateIfErrorOrCompletionSignaled(done, downstream, null)) { return; } if (mediator.queue.isEmpty() && mediator.done) { mediatorTerminatedAndDrained = true; } } if (mediatorTerminatedAndDrained && !mediator.isRetryInitiated) { mediator.isRetryInitiated = true; mediator.closeAsync().subscribe(); setTerminationSignalOrScheduleNextMediatorRequest(mediator.error, downstream, mediatorHolder); } missed = WIP.addAndGet(this, -missed); if (missed == 0) { break; } } } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p/> * See if downstream signaled cancellation to terminate the operator, if so, react to the cancellation. * * @param downstream the downstream. * @param messageDropped the message that gets dropped if cancellation was signaled. * @return true if canceled, false otherwise. */ private boolean terminateIfCancelled(CoreSubscriber<? super Message> downstream, Message messageDropped) { if (cancelled) { Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); return true; } return false; } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p/> * See if there is a pending signal for the operator termination with error or completion, if so, react to it * by terminating downstream. * * @param d indicate if the operator termination was signaled. * @param downstream the downstream. * @param messageDropped the message that gets dropped if termination happened. * @return true if terminated, false otherwise. */ private boolean terminateIfErrorOrCompletionSignaled(boolean d, CoreSubscriber<? super Message> downstream, Message messageDropped) { if (d) { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); Throwable e = error; if (e != null && e != Exceptions.TERMINATED) { e = Exceptions.terminate(ERROR, this); Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); logBuilder.log("MessageFlux reached a terminal error-state, signaling it downstream.", e); downstream.onError(e); return true; } Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); logBuilder.log("MessageFlux reached a terminal completion-state, signaling it downstream."); downstream.onComplete(); return true; } return false; } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p/> * 1. When retry is enabled (i.e., NULL_RETRY_POLICY is not set) then schedule request (retry) for the next mediator if * <ul> * <li>the operator is not in a termination signaled state,</li> * <li> and * <ul> * <li>there is no error Or</li> * <li>error is retriable and the retry is not exhausted.</li> * </ul> * </li> * </ul> * 2. If retry is enabled (i.e., NULL_RETRY_POLICY is not set) and there is 'non-retriable or retry exhaust' * error, then set an error signal for the drain-loop to terminate the operator. * <br/> * 3. If retry is disabled (i.e., NULL_RETRY_POLICY is set), then set an error signal (if first receiver error-ed) * or completion signal (if first receiver completed) for the drain-loop to terminate the operator. * <br/> * @param error the error that leads to error-ed termination of the last mediator or {@code null} * if terminated with completion. * @param downstream the downstream. * @param mediatorHolder the mediator holder. */ private void setTerminationSignalOrScheduleNextMediatorRequest(Throwable error, CoreSubscriber<? super Message> downstream, MediatorHolder mediatorHolder) { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); if (cancelled || done) { logBuilder.log("MessageFlux reached terminal-state [done:{}, cancelled:{}].", done, cancelled); return; } if (retryPolicy == NULL_RETRY_POLICY) { if (error == null) { onComplete(); } else { onError(error); } return; } final Duration delay; if (error == null) { delay = Duration.ofSeconds(1); logBuilder.addKeyValue("retryAfter", delay.toMillis()) .log("Current mediator reached terminal completion-state (retriable:true)."); } else { final int attempt = retryAttempts.incrementAndGet(); delay = retryPolicy.calculateRetryDelay(error, attempt); if (delay != null) { logBuilder.addKeyValue("attempt", attempt) .addKeyValue("retryAfter", delay.toMillis()) .log("Current mediator reached terminal error-state (retriable:true).", error); } else { logBuilder.addKeyValue("attempt", attempt) .log("Current mediator reached terminal error-state (retriable:false) Or MessageFlux retries exhausted.", error); onError(error); return; } } try { scheduleNextMediatorRequest(delay, mediatorHolder); } catch (RejectedExecutionException ree) { final RuntimeException e = Operators.onRejectedExecution(ree, downstream.currentContext()); mediatorHolder.withReceiverInfo(logger.atWarning()) .log("Unable to schedule a request for a new mediator (retriable:false).", e); onError(e); } } /** * Schedule a task to request a new mediator. * * @param delay the backoff duration before requesting the next mediator. * @param mediatorHolder the mediator holder. * @throws RejectedExecutionException if the scheduler is unable to schedule the task. */ private void scheduleNextMediatorRequest(Duration delay, MediatorHolder mediatorHolder) { final Runnable task = () -> { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); if (cancelled || done) { logBuilder.log("During the backoff, MessageFlux reached terminal-state [done:{}, cancelled:{}].", done, cancelled); return; } logBuilder.log("Requesting a new mediator."); upstream.request(1); }; mediatorHolder.nextMediatorRequestDisposable = Schedulers.parallel().schedule(task, delay.toMillis(), TimeUnit.MILLISECONDS); } }
class RecoverableReactorReceiver implements CoreSubscriber<AmqpReceiveLink>, Subscription { private final MediatorHolder mediatorHolder = new MediatorHolder(); private final MessageFlux parent; private final int prefetch; private final CreditFlowMode creditFlowMode; private final AmqpRetryPolicy retryPolicy; private final ClientLogger logger; private final AtomicInteger retryAttempts = new AtomicInteger(); private final CoreSubscriber<? super Message> messageSubscriber; private Subscription upstream; private volatile long requested; @SuppressWarnings("rawtypes") private static final AtomicLongFieldUpdater<RecoverableReactorReceiver> REQUESTED = AtomicLongFieldUpdater.newUpdater(RecoverableReactorReceiver.class, "requested"); private volatile int wip; @SuppressWarnings("rawtypes") private static final AtomicIntegerFieldUpdater<RecoverableReactorReceiver> WIP = AtomicIntegerFieldUpdater.newUpdater(RecoverableReactorReceiver.class, "wip"); private volatile boolean done; private volatile boolean cancelled; private volatile Throwable error; @SuppressWarnings("rawtypes") private static final AtomicReferenceFieldUpdater<RecoverableReactorReceiver, Throwable> ERROR = AtomicReferenceFieldUpdater.newUpdater(RecoverableReactorReceiver.class, Throwable.class, "error"); /** * Create a recoverable-receiver that supports the message-flux to stream messages from the receiver attached * to a messaging entity to the message-flux's downstream subscriber and recover from receiver termination. * * @param parent the parent message-flux. * @param messageSubscriber the message-flux's downstream subscriber to notify the events. * @param prefetch the number of messages that the operator should prefetch from the messaging entity * (for a less chatty network and faster message processing on the client). * @param creditFlowMode the mode indicating how to compute the credit and when to send it to the broker. * @param retryPolicy the retry policy to use to recover from receiver termination. */ RecoverableReactorReceiver(MessageFlux parent, CoreSubscriber<? super Message> messageSubscriber, int prefetch, CreditFlowMode creditFlowMode, AmqpRetryPolicy retryPolicy) { this.parent = parent; this.messageSubscriber = messageSubscriber; this.prefetch = prefetch; this.creditFlowMode = creditFlowMode; this.retryPolicy = retryPolicy; this.logger = parent.logger; } /** * Invoked by the upstream in response to message-flux subscribing to it. * * @param s the subscription handle for requesting receivers from the upstream or terminating upstream * through cancellation when it is no longer needed. */ @Override public void onSubscribe(Subscription s) { if (Operators.validate(upstream, s)) { upstream = s; messageSubscriber.onSubscribe(this); s.request(1); } } /** * Invoked by the upstream to deliver new receiver. * * @param receiver the new receiver. */ @Override public void onNext(AmqpReceiveLink receiver) { if (done) { receiver.closeAsync().subscribe(); Operators.onNextDropped(receiver, messageSubscriber.currentContext()); return; } final ReactorReceiverMediator mediator = new ReactorReceiverMediator(this, receiver, prefetch, creditFlowMode, logger); if (mediatorHolder.trySet(mediator)) { mediator.onParentReady(); } else { logger.atWarning() .addKeyValue("oldLinkName", mediatorHolder.getLinkName()) .addKeyValue(LINK_NAME_KEY, receiver.getLinkName()) .addKeyValue(ENTITY_PATH_KEY, receiver.getEntityPath()) .log("Got a AmqpReceiveLink when the MessageFlux is already terminated."); receiver.closeAsync().subscribe(); Operators.onDiscard(receiver, messageSubscriber.currentContext()); } } /** * Signals operator termination with error. * <ul> * <li>Invoked by the upstream when it errors, or</li> * <li>Invoked by the drain-loop when it detects a 'non-retriable or retry exhaust' error, or </li> * <li>Invoked by the drain-loop when it detects the receiver signaled terminal completion and retry * is disabled (i.e., NULL_RETRY_POLICY is set).</li> * </ul> * * @param e the error signaled. */ @Override public void onError(Throwable e) { if (done) { Operators.onErrorDropped(e, messageSubscriber.currentContext()); return; } if (Exceptions.addThrowable(ERROR, this, e)) { done = true; final String logMessage; if (retryPolicy == NULL_RETRY_POLICY) { logMessage = "Terminal error signal from Upstream|Receiver arrived at MessageFlux."; } else { logMessage = "Terminal error signal from Upstream|RetryLoop arrived at MessageFlux."; } mediatorHolder.withReceiverInfo(logger.atWarning()).log(logMessage, e); drain(null); } else { Operators.onErrorDropped(e, messageSubscriber.currentContext()); } } /** * Signals operator termination with completion. * <ul> * <li>Invoked by the upstream when it completes, or</li> * <li>Invoked by the the drain-loop when it detects first receiver signaled terminal completion and retry * is disabled (i.e., NULL_RETRY_POLICY is set).</li> * </ul> */ @Override public void onComplete() { if (done) { return; } done = true; final String logMessage; if (retryPolicy == NULL_RETRY_POLICY) { logMessage = "Terminal completion signal from Upstream|Receiver arrived at MessageFlux."; } else { logMessage = "Terminal completion signal from Upstream arrived at MessageFlux."; } mediatorHolder.withReceiverInfo(logger.atWarning()).log(logMessage); drain(null); } /** * Invoked by the downstream message subscriber to signal the demand for messages. Whatever has been * requested can be sent downstream, so only signal the demand for what can be safely handled. * No messages will be sent downstream until the demand is signaled. * * @param n the number of messages to send to downstream. */ @Override public void request(long n) { if (Operators.validate(n)) { Operators.addCap(REQUESTED, this, n); drain(null); } } /** * Invoked by downstream to signal termination by cancellation. */ @Override /** * Invoked by the new mediator when it is ready to be used. A mediator constructed in 'onNext' moves * to ready state when its backing receiver is active. * * @param updateDispositionFunc the function to disposition messages from mediator's backing receiver. */ void onMediatorReady(DispositionFunction updateDispositionFunc) { retryAttempts.set(0); parent.onNextUpdateDispositionFunction(updateDispositionFunc); drain(null); } /** * The serialized entry point to drain-loop. * * @param dataSignal the message to drop if the operator is terminated by cancellation. */ void drain(Message dataSignal) { if (WIP.getAndIncrement(this) != 0) { if (dataSignal != null && cancelled) { Operators.onDiscard(dataSignal, messageSubscriber.currentContext()); } return; } drainLoop(); } /** * The serialized drain-loop (implementation patterns inspired from RxJava, Reactor Operators). * Reference: 'Operator Concurrency Primitives' series https: */ private void drainLoop() { int missed = 1; CoreSubscriber<? super Message> downstream = messageSubscriber; for (; ;) { boolean d = done; ReactorReceiverMediator mediator = mediatorHolder.mediator; boolean hasMediator = mediator != null; if (terminateIfCancelled(downstream, null)) { return; } if (terminateIfErrorOrCompletionSignaled(d, downstream, null)) { return; } long r = requested; long emitted = 0L; boolean mediatorTerminatedAndDrained = false; if (r != 0L && hasMediator) { Queue<Message> q = mediator.queue; while (emitted != r) { Message message = q.poll(); if (terminateIfCancelled(downstream, message)) { return; } if (terminateIfErrorOrCompletionSignaled(done, downstream, message)) { return; } boolean empty = message == null; if (empty && mediator.done) { mediatorTerminatedAndDrained = true; break; } if (empty) { break; } messageSubscriber.onNext(message); emitted++; } if (emitted == r) { if (mediator.queue.isEmpty() && mediator.done) { mediatorTerminatedAndDrained = true; } } if (emitted != 0 && r != Long.MAX_VALUE) { r = REQUESTED.addAndGet(this, -emitted); } mediator.update(r, emitted); } if (r == 0L && hasMediator) { if (terminateIfCancelled(downstream, null)) { return; } if (terminateIfErrorOrCompletionSignaled(done, downstream, null)) { return; } if (mediator.queue.isEmpty() && mediator.done) { mediatorTerminatedAndDrained = true; } } if (mediatorTerminatedAndDrained && !mediator.isRetryInitiated) { mediator.isRetryInitiated = true; mediator.closeAsync().subscribe(); setTerminationSignalOrScheduleNextMediatorRequest(mediator.error, downstream, mediatorHolder); } missed = WIP.addAndGet(this, -missed); if (missed == 0) { break; } } } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p> * See if downstream signaled cancellation to terminate the operator, if so, react to the cancellation. * </p> * @param downstream the downstream. * @param messageDropped the message that gets dropped if cancellation was signaled. * @return true if canceled, false otherwise. */ private boolean terminateIfCancelled(CoreSubscriber<? super Message> downstream, Message messageDropped) { if (cancelled) { Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); return true; } return false; } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p> * See if there is a pending signal for the operator termination with error or completion, if so, react to it * by terminating downstream. * </p> * * @param d indicate if the operator termination was signaled. * @param downstream the downstream. * @param messageDropped the message that gets dropped if termination happened. * @return true if terminated, false otherwise. */ private boolean terminateIfErrorOrCompletionSignaled(boolean d, CoreSubscriber<? super Message> downstream, Message messageDropped) { if (d) { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); Throwable e = error; if (e != null && e != Exceptions.TERMINATED) { e = Exceptions.terminate(ERROR, this); Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); logBuilder.log("MessageFlux reached a terminal error-state, signaling it downstream.", e); downstream.onError(e); return true; } Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); logBuilder.log("MessageFlux reached a terminal completion-state, signaling it downstream."); downstream.onComplete(); return true; } return false; } /** * * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p></p> * 1. When retry is enabled (i.e., NULL_RETRY_POLICY is not set) then schedule request (retry) for the next mediator if * <ul> * <li>the operator is not in a termination signaled state,</li> * <li> and * <ul> * <li>there is no error Or</li> * <li>error is retriable and the retry is not exhausted.</li> * </ul> * </li> * </ul> * 2. If retry is enabled (i.e., NULL_RETRY_POLICY is not set) and there is 'non-retriable or retry exhaust' * error, then set an error signal for the drain-loop to terminate the operator. * <br/> * 3. If retry is disabled (i.e., NULL_RETRY_POLICY is set), then set an error signal (if first receiver error-ed) * or completion signal (if first receiver completed) for the drain-loop to terminate the operator. * <p></p> * @param error the error that leads to error-ed termination of the last mediator or {@code null} * if terminated with completion. * @param downstream the downstream. * @param mediatorHolder the mediator holder. */ private void setTerminationSignalOrScheduleNextMediatorRequest(Throwable error, CoreSubscriber<? super Message> downstream, MediatorHolder mediatorHolder) { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); if (cancelled || done) { logBuilder.log("MessageFlux reached terminal-state [done:{}, cancelled:{}].", done, cancelled); return; } if (retryPolicy == NULL_RETRY_POLICY) { if (error == null) { onComplete(); } else { onError(error); } return; } final Duration delay; if (error == null) { delay = Duration.ofSeconds(1); logBuilder.addKeyValue("retryAfter", delay.toMillis()) .log("Current mediator reached terminal completion-state (retriable:true)."); } else { final int attempt = retryAttempts.incrementAndGet(); delay = retryPolicy.calculateRetryDelay(error, attempt); if (delay != null) { logBuilder.addKeyValue("attempt", attempt) .addKeyValue("retryAfter", delay.toMillis()) .log("Current mediator reached terminal error-state (retriable:true).", error); } else { logBuilder.addKeyValue("attempt", attempt) .log("Current mediator reached terminal error-state (retriable:false) Or MessageFlux retries exhausted.", error); onError(error); return; } } try { scheduleNextMediatorRequest(delay, mediatorHolder); } catch (RejectedExecutionException ree) { final RuntimeException e = Operators.onRejectedExecution(ree, downstream.currentContext()); mediatorHolder.withReceiverInfo(logger.atWarning()) .log("Unable to schedule a request for a new mediator (retriable:false).", e); onError(e); } } /** * Schedule a task to request a new mediator. * * @param delay the backoff duration before requesting the next mediator. * @param mediatorHolder the mediator holder. * @throws RejectedExecutionException if the scheduler is unable to schedule the task. */ private void scheduleNextMediatorRequest(Duration delay, MediatorHolder mediatorHolder) { final Runnable task = () -> { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); if (cancelled || done) { logBuilder.log("During the backoff, MessageFlux reached terminal-state [done:{}, cancelled:{}].", done, cancelled); return; } logBuilder.log("Requesting a new mediator."); upstream.request(1); }; mediatorHolder.nextMediatorRequestDisposable = Schedulers.parallel().schedule(task, delay.toMillis(), TimeUnit.MILLISECONDS); } }
How do you choose when to use the ternary operator, the `if/else`, or `if`? (I see a mix of them here, so I was curious.)
Flux<EndpointState> getEndpointStates() { if (isV2) { return receiveLinkHandler2.getEndpointStates(); } else { return receiveLinkHandler.getEndpointStates(); } }
}
Flux<EndpointState> getEndpointStates() { if (isV2) { return receiveLinkHandler2.getEndpointStates(); } else { return receiveLinkHandler.getEndpointStates(); } }
class ReceiveLinkHandlerWrapper { private final boolean isV2; private final ReceiveLinkHandler receiveLinkHandler; private final ReceiveLinkHandler2 receiveLinkHandler2; private ClientLogger logger; public ReceiveLinkHandlerWrapper(ReceiveLinkHandler receiveLinkHandler) { this.isV2 = false; this.receiveLinkHandler = receiveLinkHandler; this.receiveLinkHandler2 = null; } public ReceiveLinkHandlerWrapper(ReceiveLinkHandler2 receiveLinkHandler2) { this.isV2 = true; this.receiveLinkHandler = null; this.receiveLinkHandler2 = receiveLinkHandler2; } public void setLogger(ClientLogger logger) { this.logger = logger; } public boolean isV2() { return this.isV2; } String getConnectionId() { return isV2 ? receiveLinkHandler2.getConnectionId() : receiveLinkHandler.getConnectionId(); } public String getLinkName() { return isV2 ? receiveLinkHandler2.getLinkName() : receiveLinkHandler.getLinkName(); } public String getHostname() { return isV2 ? receiveLinkHandler2.getHostname() : receiveLinkHandler.getHostname(); } Flux<Delivery> getDeliveredMessagesV1() { if (isV2) { return fluxError(logger, unsupportedOperation("getDeliveredMessagesV1", "V2")); } return receiveLinkHandler.getDeliveredMessages(); } Flux<Message> getDeliveredMessagesV2() { if (!isV2) { return fluxError(logger, unsupportedOperation("getDeliveredMessagesV2", "V1")); } return receiveLinkHandler2.getMessages(); } Mono<Void> sendDisposition(String deliveryTag, DeliveryState deliveryState) { if (!isV2) { return monoError(logger, unsupportedOperation("updateDisposition", "V1")); } return receiveLinkHandler2.sendDisposition(deliveryTag, deliveryState); } Mono<Boolean> beginClose(Mono<Boolean> thenMono) { if (isV2) { return receiveLinkHandler2.preClose().then(thenMono); } else { return thenMono; } } void close() { if (isV2) { receiveLinkHandler2.close(); } else { receiveLinkHandler.close(); } } private static RuntimeException unsupportedOperation(String operation, String unsupportedStack) { return new UnsupportedOperationException("The " + operation + " is not needed or supported in " + unsupportedStack + "."); } }
class ReceiveLinkHandlerWrapper { private final boolean isV2; private final ReceiveLinkHandler receiveLinkHandler; private final ReceiveLinkHandler2 receiveLinkHandler2; private ClientLogger logger; public ReceiveLinkHandlerWrapper(ReceiveLinkHandler receiveLinkHandler) { this.isV2 = false; this.receiveLinkHandler = receiveLinkHandler; this.receiveLinkHandler2 = null; } public ReceiveLinkHandlerWrapper(ReceiveLinkHandler2 receiveLinkHandler2) { this.isV2 = true; this.receiveLinkHandler = null; this.receiveLinkHandler2 = receiveLinkHandler2; } public void setLogger(ClientLogger logger) { this.logger = logger; } public boolean isV2() { return this.isV2; } String getConnectionId() { return isV2 ? receiveLinkHandler2.getConnectionId() : receiveLinkHandler.getConnectionId(); } public String getLinkName() { return isV2 ? receiveLinkHandler2.getLinkName() : receiveLinkHandler.getLinkName(); } public String getHostname() { return isV2 ? receiveLinkHandler2.getHostname() : receiveLinkHandler.getHostname(); } Flux<Delivery> getDeliveredMessagesV1() { if (isV2) { return fluxError(logger, unsupportedOperation("getDeliveredMessagesV1", "V2")); } return receiveLinkHandler.getDeliveredMessages(); } Flux<Message> getDeliveredMessagesV2() { if (!isV2) { return fluxError(logger, unsupportedOperation("getDeliveredMessagesV2", "V1")); } return receiveLinkHandler2.getMessages(); } Mono<Void> sendDisposition(String deliveryTag, DeliveryState deliveryState) { if (!isV2) { return monoError(logger, unsupportedOperation("updateDisposition", "V1")); } return receiveLinkHandler2.sendDisposition(deliveryTag, deliveryState); } Mono<Void> beginClose() { if (isV2) { return receiveLinkHandler2.preClose(); } else { return Mono.empty(); } } void close() { if (isV2) { receiveLinkHandler2.close(); } else { receiveLinkHandler.close(); } } private static RuntimeException unsupportedOperation(String operation, String unsupportedStack) { return new UnsupportedOperationException("The " + operation + " is not needed or supported in " + unsupportedStack + "."); } }
We could add another KeyValue for Call site and remove it from the log message/ Easier to grep.
private RecoveryTerminatedException checkRecoveryTerminated(String callSite) { final boolean isCacheTerminated = terminated; final boolean isConnectionTerminated = connection.isDisposed(); if (isCacheTerminated || isConnectionTerminated) { logger.atInfo() .addKeyValue(IS_CACHE_TERMINATED_KEY, isCacheTerminated) .addKeyValue(IS_CONNECTION_TERMINATED_KEY, isConnectionTerminated) .log("Channel recovery support is terminated. call-site:{}", callSite); return new RecoveryTerminatedException(connection.getId(), isCacheTerminated, isConnectionTerminated); } return null; }
.log("Channel recovery support is terminated. call-site:{}", callSite);
private RecoveryTerminatedException checkRecoveryTerminated(String callSite) { final boolean isCacheTerminated = terminated; final boolean isConnectionTerminated = connection.isDisposed(); if (isCacheTerminated || isConnectionTerminated) { logger.atInfo() .addKeyValue(IS_CACHE_TERMINATED_KEY, isCacheTerminated) .addKeyValue(IS_CONNECTION_TERMINATED_KEY, isConnectionTerminated) .addKeyValue(CALL_SITE_KEY, callSite) .log("Channel recovery support is terminated."); return new RecoveryTerminatedException(connection.getId(), isCacheTerminated, isConnectionTerminated); } return null; }
class RequestResponseChannelCache implements Disposable { private static final String IS_CACHE_TERMINATED_KEY = "isCacheTerminated"; private static final String IS_CONNECTION_TERMINATED_KEY = "isConnectionTerminated"; private static final String TRY_COUNT_KEY = "tryCount"; private final ClientLogger logger; private final ReactorConnection connection; private final Duration activationTimeout; private final Mono<RequestResponseChannel> createOrGetCachedChannel; private final Object lock = new Object(); private volatile boolean terminated; private volatile RequestResponseChannel currentChannel; RequestResponseChannelCache(ReactorConnection connection, String entityPath, String sessionName, String linksName, AmqpRetryPolicy retryPolicy) { Objects.requireNonNull(connection, "'connection' cannot be null."); Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); Objects.requireNonNull(sessionName, "'sessionName' cannot be null."); Objects.requireNonNull(linksName, "'linksName' cannot be null."); Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null."); final Map<String, Object> loggingContext = new HashMap<>(2); loggingContext.put(CONNECTION_ID_KEY, connection.getId()); loggingContext.put(LINK_NAME_KEY, linksName); this.logger = new ClientLogger(RequestResponseChannelCache.class, loggingContext); this.connection = connection; this.activationTimeout = retryPolicy.getRetryOptions().getTryTimeout(); final Mono<RequestResponseChannel> newChannel = Mono.defer(() -> { final RecoveryTerminatedException terminatedError = checkRecoveryTerminated("new-channel"); if (terminatedError != null) { return Mono.error(terminatedError); } return connection.newRequestResponseChannel(sessionName, linksName, entityPath); }); this.createOrGetCachedChannel = newChannel .flatMap(c -> { logger.atInfo() .log("Waiting for channel to active."); final Mono<RequestResponseChannel> awaitToActive = c.getEndpointStates() .filter(s -> s == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Channel completed without being active.", null))) .then(Mono.just(c)) .timeout(activationTimeout, Mono.defer(() -> { final String timeoutMessage = String.format("The channel activation wait timed-out (%s).", activationTimeout); logger.atInfo().log(timeoutMessage + " Closing channel."); return c.closeAsync().then(Mono.error(new AmqpException(true, timeoutMessage, null))); })); return awaitToActive .doOnCancel(() -> { logger.atInfo() .log("The channel request was canceled while waiting to active."); if (!c.isDisposed()) { c.closeAsync().subscribe(); } }); }) .retryWhen(retryWhenSpec(retryPolicy)) .<RequestResponseChannel>handle((c, sink) -> { final RequestResponseChannel channel = c; final RecoveryTerminatedException terminatedError; synchronized (lock) { terminatedError = checkRecoveryTerminated("cache-refresh"); this.currentChannel = channel; } if (terminatedError != null) { if (!channel.isDisposed()) { channel.closeAsync().subscribe(); } sink.error(terminatedError.propagate()); } else { logger.atInfo().log("Emitting the new active channel."); sink.next(channel); } }).cacheInvalidateIf(c -> { if (c.isDisposedOrDisposalInInProgress()) { logger.atInfo().log("The channel is closed, requesting a new channel."); return true; } else { return false; } }); } /** * Get the Mono that, when subscribed, emits the cached RequestResponseChannel if it is active or creates and * emits a new RequestResponseChannel if the cache is empty or the current cached RequestResponseChannel is in * closed state. * * @return a Mono that emits active RequestResponseChannel. */ public Mono<RequestResponseChannel> get() { return createOrGetCachedChannel; } /** * Terminate the cache such that it is no longer possible to obtain RequestResponseChannel using {@link this * If there is a current (cached) RequestResponseChannel then it will be closed. */ @Override public void dispose() { final RequestResponseChannel channel; synchronized (lock) { if (terminated) { return; } terminated = true; channel = currentChannel; } if (channel != null && !channel.isDisposed()) { logger.atInfo().log("Closing the cached channel and Terminating the channel recovery support."); channel.closeAsync().subscribe(); } else { logger.atInfo().log("Terminating the channel recovery support."); } } @Override public boolean isDisposed() { return terminated; } private Retry retryWhenSpec(AmqpRetryPolicy retryPolicy) { return Retry.from(retrySignals -> retrySignals .concatMap(retrySignal -> { final Retry.RetrySignal signal = retrySignal.copy(); final Throwable error = signal.failure(); final long iteration = signal.totalRetriesInARow(); if (error == null) { return Mono.error(new IllegalStateException("RetrySignal::failure() not expected to be null.")); } final boolean shouldRetry = error instanceof TimeoutException || (error instanceof AmqpException && ((AmqpException) error).isTransient() || (error instanceof IllegalStateException) || (error instanceof RejectedExecutionException)); if (!shouldRetry) { logger.atWarning() .addKeyValue(TRY_COUNT_KEY, iteration) .log("Exception is non-retriable, not retrying for a new channel.", error); if (error instanceof RecoveryTerminatedException) { return Mono.error(((RecoveryTerminatedException) error).propagate()); } else { return Mono.error(error); } } final Throwable errorToUse = error instanceof AmqpException ? error : new AmqpException(true, "Non-AmqpException occurred upstream.", error, null); final long attempts = Math.min(iteration, retryPolicy.getMaxRetries()); final Duration backoff = retryPolicy.calculateRetryDelay(errorToUse, (int) attempts); if (backoff == null) { logger.atWarning() .addKeyValue(TRY_COUNT_KEY, iteration) .log("Retry is disabled, not retrying for a new channel.", error); return Mono.error(error); } logger.atInfo() .addKeyValue(TRY_COUNT_KEY, iteration) .addKeyValue(INTERVAL_KEY, backoff.toMillis()) .log("Transient error occurred. Retrying.", error); return Mono.delay(backoff); })); } /** * Check if this cache is in a state where the cache refresh (i.e. recovery of RequestResponseChannel) is no longer * possible. * <p> * The recovery mechanism is terminated once the cache is terminated due to {@link RequestResponseChannelCache * call or the parent {@link ReactorConnection} is in terminated state. * Since the parent {@link ReactorConnection} hosts any RequestResponseChannel object that RequestResponseChannelCache * caches, recovery (scoped to the Connection) is impossible once the Connection is terminated * (i.e. connection.isDisposed() == true). Which also means RequestResponseChannelCache cannot outlive the Connection. * * @param callSite the call site checking the recovery termination (for logging). * @return {@link RecoveryTerminatedException} if the recovery is terminated, {@code null} otherwise. */ /** * The error type (internal to the cache) representing the termination of recovery support, which means cache cannot * be refreshed any longer. * @See {@link RequestResponseChannelCache */ private static final class RecoveryTerminatedException extends RuntimeException { private final String connectionId; private final String message; RecoveryTerminatedException(String connectionId, boolean isCacheTerminated, boolean isConnectionTerminated) { this.connectionId = connectionId; this.message = String.format("%s:%b %s:%b", IS_CACHE_TERMINATED_KEY, isCacheTerminated, IS_CONNECTION_TERMINATED_KEY, isConnectionTerminated); } /** * Translate this recovery terminated error to {@link RequestResponseChannelClosedException} to propagate * to the downstream of the {@link RequestResponseChannelCache}. * <p> * Termination of the recovery (due to Cache or Connection termination) means any cached RequestResponseChannel * is terminated or no new RequestResponseChannel can host on the Connection. In this case, we intentionally * propagate 'RequestResponseChannelClosedException' to downstream. If the downstream is a part async chain with * the {@link ReactorConnectionCache} as upstream, then the chain may retry on this specific error type to obtain * a new Connection and a new RequestResponseChannelCache which provides RequestResponseChannel hosted on this * new Connection. Examples of such async chains are those that enable Producer and Consumer recovery. * * @return the {@link RequestResponseChannelClosedException}. */ RequestResponseChannelClosedException propagate() { return new RequestResponseChannelClosedException(connectionId, message); } } }
class RequestResponseChannelCache implements Disposable { private static final String IS_CACHE_TERMINATED_KEY = "isCacheTerminated"; private static final String IS_CONNECTION_TERMINATED_KEY = "isConnectionTerminated"; private static final String TRY_COUNT_KEY = "tryCount"; private final ClientLogger logger; private final ReactorConnection connection; private final Duration activationTimeout; private final Mono<RequestResponseChannel> createOrGetCachedChannel; private final Object lock = new Object(); private volatile boolean terminated; private volatile RequestResponseChannel currentChannel; RequestResponseChannelCache(ReactorConnection connection, String entityPath, String sessionName, String linksName, AmqpRetryPolicy retryPolicy) { Objects.requireNonNull(connection, "'connection' cannot be null."); Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); Objects.requireNonNull(sessionName, "'sessionName' cannot be null."); Objects.requireNonNull(linksName, "'linksName' cannot be null."); Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null."); final Map<String, Object> loggingContext = new HashMap<>(2); loggingContext.put(CONNECTION_ID_KEY, connection.getId()); loggingContext.put(LINK_NAME_KEY, linksName); this.logger = new ClientLogger(RequestResponseChannelCache.class, loggingContext); this.connection = connection; this.activationTimeout = retryPolicy.getRetryOptions().getTryTimeout(); final Mono<RequestResponseChannel> newChannel = Mono.defer(() -> { final RecoveryTerminatedException terminatedError = checkRecoveryTerminated("new-channel"); if (terminatedError != null) { return Mono.error(terminatedError); } return connection.newRequestResponseChannel(sessionName, linksName, entityPath); }); this.createOrGetCachedChannel = newChannel .flatMap(c -> { logger.atInfo() .log("Waiting for channel to active."); final Mono<RequestResponseChannel> awaitToActive = c.getEndpointStates() .filter(s -> s == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Channel completed without being active.", null))) .then(Mono.just(c)) .timeout(activationTimeout, Mono.defer(() -> { final String timeoutMessage = String.format("The channel activation wait timed-out (%s).", activationTimeout); logger.atInfo().log(timeoutMessage + " Closing channel."); return c.closeAsync().then(Mono.error(new AmqpException(true, timeoutMessage, null))); })); return awaitToActive .doOnCancel(() -> { logger.atInfo() .log("The channel request was canceled while waiting to active."); if (!c.isDisposed()) { c.closeAsync().subscribe(); } }); }) .retryWhen(retryWhenSpec(retryPolicy)) .<RequestResponseChannel>handle((c, sink) -> { final RequestResponseChannel channel = c; final RecoveryTerminatedException terminatedError; synchronized (lock) { terminatedError = checkRecoveryTerminated("cache-refresh"); this.currentChannel = channel; } if (terminatedError != null) { if (!channel.isDisposed()) { channel.closeAsync().subscribe(); } sink.error(terminatedError.propagate()); } else { logger.atInfo().log("Emitting the new active channel."); sink.next(channel); } }).cacheInvalidateIf(c -> { if (c.isDisposedOrDisposalInInProgress()) { logger.atInfo().log("The channel is closed, requesting a new channel."); return true; } else { return false; } }); } /** * Get the Mono that, when subscribed, emits the cached RequestResponseChannel if it is active or creates and * emits a new RequestResponseChannel if the cache is empty or the current cached RequestResponseChannel is in * closed state. * * @return a Mono that emits active RequestResponseChannel. */ public Mono<RequestResponseChannel> get() { return createOrGetCachedChannel; } /** * Terminate the cache such that it is no longer possible to obtain RequestResponseChannel using {@link this * If there is a current (cached) RequestResponseChannel then it will be closed. */ @Override public void dispose() { final RequestResponseChannel channel; synchronized (lock) { if (terminated) { return; } terminated = true; channel = currentChannel; } if (channel != null && !channel.isDisposed()) { logger.atInfo().log("Closing the cached channel and Terminating the channel recovery support."); channel.closeAsync().subscribe(); } else { logger.atInfo().log("Terminating the channel recovery support."); } } @Override public boolean isDisposed() { return terminated; } private Retry retryWhenSpec(AmqpRetryPolicy retryPolicy) { return Retry.from(retrySignals -> retrySignals .concatMap(retrySignal -> { final Retry.RetrySignal signal = retrySignal.copy(); final Throwable error = signal.failure(); final long iteration = signal.totalRetriesInARow(); if (error == null) { return Mono.error(new IllegalStateException("RetrySignal::failure() not expected to be null.")); } final boolean shouldRetry = error instanceof TimeoutException || (error instanceof AmqpException && ((AmqpException) error).isTransient() || (error instanceof IllegalStateException) || (error instanceof RejectedExecutionException)); if (!shouldRetry) { logger.atWarning() .addKeyValue(TRY_COUNT_KEY, iteration) .log("Exception is non-retriable, not retrying for a new channel.", error); if (error instanceof RecoveryTerminatedException) { return Mono.error(((RecoveryTerminatedException) error).propagate()); } else { return Mono.error(error); } } final Throwable errorToUse = error instanceof AmqpException ? error : new AmqpException(true, "Non-AmqpException occurred upstream.", error, null); final long attempts = Math.min(iteration, retryPolicy.getMaxRetries()); final Duration backoff = retryPolicy.calculateRetryDelay(errorToUse, (int) attempts); if (backoff == null) { logger.atWarning() .addKeyValue(TRY_COUNT_KEY, iteration) .log("Retry is disabled, not retrying for a new channel.", error); return Mono.error(error); } logger.atInfo() .addKeyValue(TRY_COUNT_KEY, iteration) .addKeyValue(INTERVAL_KEY, backoff.toMillis()) .log("Transient error occurred. Retrying.", error); return Mono.delay(backoff); })); } /** * Check if this cache is in a state where the cache refresh (i.e. recovery of RequestResponseChannel) is no longer * possible. * <p> * The recovery mechanism is terminated once the cache is terminated due to {@link RequestResponseChannelCache * call or the parent {@link ReactorConnection} is in terminated state. * Since the parent {@link ReactorConnection} hosts any RequestResponseChannel object that RequestResponseChannelCache * caches, recovery (scoped to the Connection) is impossible once the Connection is terminated * (i.e. connection.isDisposed() == true). Which also means RequestResponseChannelCache cannot outlive the Connection. * * @param callSite the call site checking the recovery termination (for logging). * @return {@link RecoveryTerminatedException} if the recovery is terminated, {@code null} otherwise. */ /** * The error type (internal to the cache) representing the termination of recovery support, which means cache cannot * be refreshed any longer. * @See {@link RequestResponseChannelCache */ private static final class RecoveryTerminatedException extends RuntimeException { private final String connectionId; private final String message; RecoveryTerminatedException(String connectionId, boolean isCacheTerminated, boolean isConnectionTerminated) { this.connectionId = connectionId; this.message = String.format("%s:%b %s:%b", IS_CACHE_TERMINATED_KEY, isCacheTerminated, IS_CONNECTION_TERMINATED_KEY, isConnectionTerminated); } /** * Translate this recovery terminated error to {@link RequestResponseChannelClosedException} to propagate * to the downstream of the {@link RequestResponseChannelCache}. * <p> * Termination of the recovery (due to Cache or Connection termination) means any cached RequestResponseChannel * is terminated or no new RequestResponseChannel can host on the Connection. In this case, we intentionally * propagate 'RequestResponseChannelClosedException' to downstream. If the downstream is a part async chain with * the {@link ReactorConnectionCache} as upstream, then the chain may retry on this specific error type to obtain * a new Connection and a new RequestResponseChannelCache which provides RequestResponseChannel hosted on this * new Connection. Examples of such async chains are those that enable Producer and Consumer recovery. * * @return the {@link RequestResponseChannelClosedException}. */ RequestResponseChannelClosedException propagate() { return new RequestResponseChannelClosedException(connectionId, message); } } }
Let me update the doc to make it more readable. **[DONE]**
public void onNext(AmqpReceiveLink receiver) { if (done) { receiver.closeAsync().subscribe(); Operators.onNextDropped(receiver, messageSubscriber.currentContext()); return; } final ReactorReceiverMediator mediator = new ReactorReceiverMediator(this, receiver, prefetch, creditFlowMode, logger); if (mediatorHolder.trySet(mediator)) { mediator.onParentReady(); } else { logger.atWarning() .addKeyValue("oldLinkName", mediatorHolder.getLinkName()) .addKeyValue(LINK_NAME_KEY, receiver.getLinkName()) .addKeyValue(ENTITY_PATH_KEY, receiver.getEntityPath()) .log("Got a AmqpReceiveLink when the MessageFlux is already terminated."); receiver.closeAsync().subscribe(); Operators.onDiscard(receiver, messageSubscriber.currentContext()); } }
public void onNext(AmqpReceiveLink receiver) { if (done) { receiver.closeAsync().subscribe(); Operators.onNextDropped(receiver, messageSubscriber.currentContext()); return; } final ReactorReceiverMediator mediator = new ReactorReceiverMediator(this, receiver, prefetch, creditFlowMode, logger); if (mediatorHolder.trySet(mediator)) { mediator.onParentReady(); } else { logger.atWarning() .addKeyValue("oldLinkName", mediatorHolder.getLinkName()) .addKeyValue(LINK_NAME_KEY, receiver.getLinkName()) .addKeyValue(ENTITY_PATH_KEY, receiver.getEntityPath()) .log("Got a AmqpReceiveLink when the MessageFlux is already terminated."); receiver.closeAsync().subscribe(); Operators.onDiscard(receiver, messageSubscriber.currentContext()); } }
class RecoverableReactorReceiver implements CoreSubscriber<AmqpReceiveLink>, Subscription { private final MediatorHolder mediatorHolder = new MediatorHolder(); private final MessageFlux parent; private final int prefetch; private final CreditFlowMode creditFlowMode; private final AmqpRetryPolicy retryPolicy; private final ClientLogger logger; private final AtomicInteger retryAttempts = new AtomicInteger(); private final CoreSubscriber<? super Message> messageSubscriber; private Subscription upstream; private volatile long requested; @SuppressWarnings("rawtypes") private static final AtomicLongFieldUpdater<RecoverableReactorReceiver> REQUESTED = AtomicLongFieldUpdater.newUpdater(RecoverableReactorReceiver.class, "requested"); private volatile int wip; @SuppressWarnings("rawtypes") private static final AtomicIntegerFieldUpdater<RecoverableReactorReceiver> WIP = AtomicIntegerFieldUpdater.newUpdater(RecoverableReactorReceiver.class, "wip"); private volatile boolean done; private volatile boolean cancelled; volatile Throwable error; @SuppressWarnings("rawtypes") static final AtomicReferenceFieldUpdater<RecoverableReactorReceiver, Throwable> ERROR = AtomicReferenceFieldUpdater.newUpdater(RecoverableReactorReceiver.class, Throwable.class, "error"); /** * Create a recoverable-receiver that supports the message-flux to stream messages from the receiver attached * to a messaging entity to the message-flux's downstream subscriber and recover from receiver termination. * * @param parent the parent message-flux. * @param messageSubscriber the message-flux's downstream subscriber to notify the events. * @param prefetch the number of messages that the operator should prefetch from the messaging entity * (for a less chatty network and faster message processing on the client). * @param creditFlowMode the mode indicating how to compute the credit and when to send it to the broker. * @param retryPolicy the retry policy to use to recover from receiver termination. */ RecoverableReactorReceiver(MessageFlux parent, CoreSubscriber<? super Message> messageSubscriber, int prefetch, CreditFlowMode creditFlowMode, AmqpRetryPolicy retryPolicy) { this.parent = parent; this.messageSubscriber = messageSubscriber; this.prefetch = prefetch; this.creditFlowMode = creditFlowMode; this.retryPolicy = retryPolicy; this.logger = parent.logger; } /** * Invoked by the upstream in response to message-flux subscribing to it. * * @param s the subscription handle for requesting receivers from the upstream or terminating upstream * through cancellation when it is no longer needed. */ @Override public void onSubscribe(Subscription s) { if (Operators.validate(upstream, s)) { upstream = s; messageSubscriber.onSubscribe(this); s.request(1); } } /** * Invoked by the upstream to deliver new receiver. * * @param receiver the new receiver. */ @Override /** * Signals operator termination with error. * <ul> * <li>Invoked by the upstream when it errors, or</li> * <li>Invoked by the drain-loop when it detects a 'non-retriable or retry exhaust' error, or </li> * <li>Invoked by the drain-loop when it detects the receiver signaled terminal completion and retry * is disabled (i.e., NULL_RETRY_POLICY is set).</li> * </ul> * * @param e the error signaled. */ @Override public void onError(Throwable e) { if (done) { Operators.onErrorDropped(e, messageSubscriber.currentContext()); return; } if (Exceptions.addThrowable(ERROR, this, e)) { done = true; final String logMessage; if (retryPolicy == NULL_RETRY_POLICY) { logMessage = "Terminal error signal from Upstream|Receiver arrived at MessageFlux."; } else { logMessage = "Terminal error signal from Upstream|RetryLoop arrived at MessageFlux."; } mediatorHolder.withReceiverInfo(logger.atWarning()).log(logMessage, e); drain(null); } else { Operators.onErrorDropped(e, messageSubscriber.currentContext()); } } /** * Signals operator termination with completion. * <ul> * <li>Invoked by the upstream when it completes, or</li> * <li>Invoked by the the drain-loop when it detects first receiver signaled terminal completion and retry * is disabled (i.e., NULL_RETRY_POLICY is set).</li> * </ul> */ @Override public void onComplete() { if (done) { return; } done = true; final String logMessage; if (retryPolicy == NULL_RETRY_POLICY) { logMessage = "Terminal completion signal from Upstream|Receiver arrived at MessageFlux."; } else { logMessage = "Terminal completion signal from Upstream arrived at MessageFlux."; } mediatorHolder.withReceiverInfo(logger.atWarning()).log(logMessage); drain(null); } /** * Invoked by the downstream message subscriber to signal the demand for messages. Whatever has been * requested can be sent downstream, so only signal the demand for what can be safely handled. * No messages will be sent downstream until the demand is signaled. * * @param n the number of messages to send to downstream. */ @Override public void request(long n) { if (Operators.validate(n)) { Operators.addCap(REQUESTED, this, n); drain(null); } } /** * Invoked by downstream to signal termination by cancellation. */ @Override public void cancel() { if (cancelled) { return; } cancelled = true; mediatorHolder.withReceiverInfo(logger.atWarning()) .log("Downstream cancellation signal arrived at MessageFlux."); if (WIP.getAndIncrement(this) == 0) { upstream.cancel(); mediatorHolder.freeze(); } } /** * Invoked by the new mediator when it is ready to be used. A mediator constructed in 'onNext' moves * to ready state when its backing receiver is active. * * @param updateDispositionFunc the function to disposition messages from mediator's backing receiver. */ void onMediatorReady(BiFunction<String, DeliveryState, Mono<Void>> updateDispositionFunc) { retryAttempts.set(0); parent.onNextUpdateDispositionFunction(updateDispositionFunc); drain(null); } /** * The serialized entry point to drain-loop. * * @param dataSignal the message to drop if the operator is terminated by cancellation. */ void drain(Message dataSignal) { if (WIP.getAndIncrement(this) != 0) { if (dataSignal != null && cancelled) { Operators.onDiscard(dataSignal, messageSubscriber.currentContext()); } return; } drainLoop(); } /** * The serialized drain-loop (implementation patterns inspired from RxJava, Reactor Operators). * Reference: 'Operator Concurrency Primitives' series https: */ private void drainLoop() { int missed = 1; CoreSubscriber<? super Message> downstream = messageSubscriber; for (; ;) { boolean d = done; ReactorReceiverMediator mediator = mediatorHolder.mediator; boolean hasMediator = mediator != null; if (terminateIfCancelled(downstream, null)) { return; } if (terminateIfErrorOrCompletionSignaled(d, downstream, null)) { return; } long r = requested; long emitted = 0L; boolean mediatorTerminatedAndDrained = false; if (r != 0L && hasMediator) { Queue<Message> q = mediator.queue; while (emitted != r) { Message message = q.poll(); if (terminateIfCancelled(downstream, message)) { return; } if (terminateIfErrorOrCompletionSignaled(done, downstream, message)) { return; } boolean empty = message == null; if (empty && mediator.done) { mediatorTerminatedAndDrained = true; break; } if (empty) { break; } messageSubscriber.onNext(message); emitted++; } if (emitted == r) { if (mediator.queue.isEmpty() && mediator.done) { mediatorTerminatedAndDrained = true; } } if (emitted != 0 && r != Long.MAX_VALUE) { r = REQUESTED.addAndGet(this, -emitted); } mediator.update(r, emitted); } if (r == 0L && hasMediator) { if (terminateIfCancelled(downstream, null)) { return; } if (terminateIfErrorOrCompletionSignaled(done, downstream, null)) { return; } if (mediator.queue.isEmpty() && mediator.done) { mediatorTerminatedAndDrained = true; } } if (mediatorTerminatedAndDrained && !mediator.isRetryInitiated) { mediator.isRetryInitiated = true; mediator.closeAsync().subscribe(); setTerminationSignalOrScheduleNextMediatorRequest(mediator.error, downstream, mediatorHolder); } missed = WIP.addAndGet(this, -missed); if (missed == 0) { break; } } } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p/> * See if downstream signaled cancellation to terminate the operator, if so, react to the cancellation. * * @param downstream the downstream. * @param messageDropped the message that gets dropped if cancellation was signaled. * @return true if canceled, false otherwise. */ private boolean terminateIfCancelled(CoreSubscriber<? super Message> downstream, Message messageDropped) { if (cancelled) { Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); return true; } return false; } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p/> * See if there is a pending signal for the operator termination with error or completion, if so, react to it * by terminating downstream. * * @param d indicate if the operator termination was signaled. * @param downstream the downstream. * @param messageDropped the message that gets dropped if termination happened. * @return true if terminated, false otherwise. */ private boolean terminateIfErrorOrCompletionSignaled(boolean d, CoreSubscriber<? super Message> downstream, Message messageDropped) { if (d) { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); Throwable e = error; if (e != null && e != Exceptions.TERMINATED) { e = Exceptions.terminate(ERROR, this); Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); logBuilder.log("MessageFlux reached a terminal error-state, signaling it downstream.", e); downstream.onError(e); return true; } Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); logBuilder.log("MessageFlux reached a terminal completion-state, signaling it downstream."); downstream.onComplete(); return true; } return false; } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p/> * 1. When retry is enabled (i.e., NULL_RETRY_POLICY is not set) then schedule request (retry) for the next mediator if * <ul> * <li>the operator is not in a termination signaled state,</li> * <li> and * <ul> * <li>there is no error Or</li> * <li>error is retriable and the retry is not exhausted.</li> * </ul> * </li> * </ul> * 2. If retry is enabled (i.e., NULL_RETRY_POLICY is not set) and there is 'non-retriable or retry exhaust' * error, then set an error signal for the drain-loop to terminate the operator. * <br/> * 3. If retry is disabled (i.e., NULL_RETRY_POLICY is set), then set an error signal (if first receiver error-ed) * or completion signal (if first receiver completed) for the drain-loop to terminate the operator. * <br/> * @param error the error that leads to error-ed termination of the last mediator or {@code null} * if terminated with completion. * @param downstream the downstream. * @param mediatorHolder the mediator holder. */ private void setTerminationSignalOrScheduleNextMediatorRequest(Throwable error, CoreSubscriber<? super Message> downstream, MediatorHolder mediatorHolder) { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); if (cancelled || done) { logBuilder.log("MessageFlux reached terminal-state [done:{}, cancelled:{}].", done, cancelled); return; } if (retryPolicy == NULL_RETRY_POLICY) { if (error == null) { onComplete(); } else { onError(error); } return; } final Duration delay; if (error == null) { delay = Duration.ofSeconds(1); logBuilder.addKeyValue("retryAfter", delay.toMillis()) .log("Current mediator reached terminal completion-state (retriable:true)."); } else { final int attempt = retryAttempts.incrementAndGet(); delay = retryPolicy.calculateRetryDelay(error, attempt); if (delay != null) { logBuilder.addKeyValue("attempt", attempt) .addKeyValue("retryAfter", delay.toMillis()) .log("Current mediator reached terminal error-state (retriable:true).", error); } else { logBuilder.addKeyValue("attempt", attempt) .log("Current mediator reached terminal error-state (retriable:false) Or MessageFlux retries exhausted.", error); onError(error); return; } } try { scheduleNextMediatorRequest(delay, mediatorHolder); } catch (RejectedExecutionException ree) { final RuntimeException e = Operators.onRejectedExecution(ree, downstream.currentContext()); mediatorHolder.withReceiverInfo(logger.atWarning()) .log("Unable to schedule a request for a new mediator (retriable:false).", e); onError(e); } } /** * Schedule a task to request a new mediator. * * @param delay the backoff duration before requesting the next mediator. * @param mediatorHolder the mediator holder. * @throws RejectedExecutionException if the scheduler is unable to schedule the task. */ private void scheduleNextMediatorRequest(Duration delay, MediatorHolder mediatorHolder) { final Runnable task = () -> { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); if (cancelled || done) { logBuilder.log("During the backoff, MessageFlux reached terminal-state [done:{}, cancelled:{}].", done, cancelled); return; } logBuilder.log("Requesting a new mediator."); upstream.request(1); }; mediatorHolder.nextMediatorRequestDisposable = Schedulers.parallel().schedule(task, delay.toMillis(), TimeUnit.MILLISECONDS); } }
class RecoverableReactorReceiver implements CoreSubscriber<AmqpReceiveLink>, Subscription { private final MediatorHolder mediatorHolder = new MediatorHolder(); private final MessageFlux parent; private final int prefetch; private final CreditFlowMode creditFlowMode; private final AmqpRetryPolicy retryPolicy; private final ClientLogger logger; private final AtomicInteger retryAttempts = new AtomicInteger(); private final CoreSubscriber<? super Message> messageSubscriber; private Subscription upstream; private volatile long requested; @SuppressWarnings("rawtypes") private static final AtomicLongFieldUpdater<RecoverableReactorReceiver> REQUESTED = AtomicLongFieldUpdater.newUpdater(RecoverableReactorReceiver.class, "requested"); private volatile int wip; @SuppressWarnings("rawtypes") private static final AtomicIntegerFieldUpdater<RecoverableReactorReceiver> WIP = AtomicIntegerFieldUpdater.newUpdater(RecoverableReactorReceiver.class, "wip"); private volatile boolean done; private volatile boolean cancelled; private volatile Throwable error; @SuppressWarnings("rawtypes") private static final AtomicReferenceFieldUpdater<RecoverableReactorReceiver, Throwable> ERROR = AtomicReferenceFieldUpdater.newUpdater(RecoverableReactorReceiver.class, Throwable.class, "error"); /** * Create a recoverable-receiver that supports the message-flux to stream messages from the receiver attached * to a messaging entity to the message-flux's downstream subscriber and recover from receiver termination. * * @param parent the parent message-flux. * @param messageSubscriber the message-flux's downstream subscriber to notify the events. * @param prefetch the number of messages that the operator should prefetch from the messaging entity * (for a less chatty network and faster message processing on the client). * @param creditFlowMode the mode indicating how to compute the credit and when to send it to the broker. * @param retryPolicy the retry policy to use to recover from receiver termination. */ RecoverableReactorReceiver(MessageFlux parent, CoreSubscriber<? super Message> messageSubscriber, int prefetch, CreditFlowMode creditFlowMode, AmqpRetryPolicy retryPolicy) { this.parent = parent; this.messageSubscriber = messageSubscriber; this.prefetch = prefetch; this.creditFlowMode = creditFlowMode; this.retryPolicy = retryPolicy; this.logger = parent.logger; } /** * Invoked by the upstream in response to message-flux subscribing to it. * * @param s the subscription handle for requesting receivers from the upstream or terminating upstream * through cancellation when it is no longer needed. */ @Override public void onSubscribe(Subscription s) { if (Operators.validate(upstream, s)) { upstream = s; messageSubscriber.onSubscribe(this); s.request(1); } } /** * Invoked by the upstream to deliver new receiver. * * @param receiver the new receiver. */ @Override /** * Signals operator termination with error. * <ul> * <li>Invoked by the upstream when it errors, or</li> * <li>Invoked by the drain-loop when it detects a 'non-retriable or retry exhaust' error, or </li> * <li>Invoked by the drain-loop when it detects the receiver signaled terminal completion and retry * is disabled (i.e., NULL_RETRY_POLICY is set).</li> * </ul> * * @param e the error signaled. */ @Override public void onError(Throwable e) { if (done) { Operators.onErrorDropped(e, messageSubscriber.currentContext()); return; } if (Exceptions.addThrowable(ERROR, this, e)) { done = true; final String logMessage; if (retryPolicy == NULL_RETRY_POLICY) { logMessage = "Terminal error signal from Upstream|Receiver arrived at MessageFlux."; } else { logMessage = "Terminal error signal from Upstream|RetryLoop arrived at MessageFlux."; } mediatorHolder.withReceiverInfo(logger.atWarning()).log(logMessage, e); drain(null); } else { Operators.onErrorDropped(e, messageSubscriber.currentContext()); } } /** * Signals operator termination with completion. * <ul> * <li>Invoked by the upstream when it completes, or</li> * <li>Invoked by the the drain-loop when it detects first receiver signaled terminal completion and retry * is disabled (i.e., NULL_RETRY_POLICY is set).</li> * </ul> */ @Override public void onComplete() { if (done) { return; } done = true; final String logMessage; if (retryPolicy == NULL_RETRY_POLICY) { logMessage = "Terminal completion signal from Upstream|Receiver arrived at MessageFlux."; } else { logMessage = "Terminal completion signal from Upstream arrived at MessageFlux."; } mediatorHolder.withReceiverInfo(logger.atWarning()).log(logMessage); drain(null); } /** * Invoked by the downstream message subscriber to signal the demand for messages. Whatever has been * requested can be sent downstream, so only signal the demand for what can be safely handled. * No messages will be sent downstream until the demand is signaled. * * @param n the number of messages to send to downstream. */ @Override public void request(long n) { if (Operators.validate(n)) { Operators.addCap(REQUESTED, this, n); drain(null); } } /** * Invoked by downstream to signal termination by cancellation. */ @Override public void cancel() { if (cancelled) { return; } cancelled = true; mediatorHolder.withReceiverInfo(logger.atWarning()) .log("Downstream cancellation signal arrived at MessageFlux."); if (WIP.getAndIncrement(this) == 0) { upstream.cancel(); mediatorHolder.freeze(); } } /** * Invoked by the new mediator when it is ready to be used. A mediator constructed in 'onNext' moves * to ready state when its backing receiver is active. * * @param updateDispositionFunc the function to disposition messages from mediator's backing receiver. */ void onMediatorReady(DispositionFunction updateDispositionFunc) { retryAttempts.set(0); parent.onNextUpdateDispositionFunction(updateDispositionFunc); drain(null); } /** * The serialized entry point to drain-loop. * * @param dataSignal the message to drop if the operator is terminated by cancellation. */ void drain(Message dataSignal) { if (WIP.getAndIncrement(this) != 0) { if (dataSignal != null && cancelled) { Operators.onDiscard(dataSignal, messageSubscriber.currentContext()); } return; } drainLoop(); } /** * The serialized drain-loop (implementation patterns inspired from RxJava, Reactor Operators). * Reference: 'Operator Concurrency Primitives' series https: */ private void drainLoop() { int missed = 1; CoreSubscriber<? super Message> downstream = messageSubscriber; for (; ;) { boolean d = done; ReactorReceiverMediator mediator = mediatorHolder.mediator; boolean hasMediator = mediator != null; if (terminateIfCancelled(downstream, null)) { return; } if (terminateIfErrorOrCompletionSignaled(d, downstream, null)) { return; } long r = requested; long emitted = 0L; boolean mediatorTerminatedAndDrained = false; if (r != 0L && hasMediator) { Queue<Message> q = mediator.queue; while (emitted != r) { Message message = q.poll(); if (terminateIfCancelled(downstream, message)) { return; } if (terminateIfErrorOrCompletionSignaled(done, downstream, message)) { return; } boolean empty = message == null; if (empty && mediator.done) { mediatorTerminatedAndDrained = true; break; } if (empty) { break; } messageSubscriber.onNext(message); emitted++; } if (emitted == r) { if (mediator.queue.isEmpty() && mediator.done) { mediatorTerminatedAndDrained = true; } } if (emitted != 0 && r != Long.MAX_VALUE) { r = REQUESTED.addAndGet(this, -emitted); } mediator.update(r, emitted); } if (r == 0L && hasMediator) { if (terminateIfCancelled(downstream, null)) { return; } if (terminateIfErrorOrCompletionSignaled(done, downstream, null)) { return; } if (mediator.queue.isEmpty() && mediator.done) { mediatorTerminatedAndDrained = true; } } if (mediatorTerminatedAndDrained && !mediator.isRetryInitiated) { mediator.isRetryInitiated = true; mediator.closeAsync().subscribe(); setTerminationSignalOrScheduleNextMediatorRequest(mediator.error, downstream, mediatorHolder); } missed = WIP.addAndGet(this, -missed); if (missed == 0) { break; } } } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p> * See if downstream signaled cancellation to terminate the operator, if so, react to the cancellation. * </p> * @param downstream the downstream. * @param messageDropped the message that gets dropped if cancellation was signaled. * @return true if canceled, false otherwise. */ private boolean terminateIfCancelled(CoreSubscriber<? super Message> downstream, Message messageDropped) { if (cancelled) { Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); return true; } return false; } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p> * See if there is a pending signal for the operator termination with error or completion, if so, react to it * by terminating downstream. * </p> * * @param d indicate if the operator termination was signaled. * @param downstream the downstream. * @param messageDropped the message that gets dropped if termination happened. * @return true if terminated, false otherwise. */ private boolean terminateIfErrorOrCompletionSignaled(boolean d, CoreSubscriber<? super Message> downstream, Message messageDropped) { if (d) { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); Throwable e = error; if (e != null && e != Exceptions.TERMINATED) { e = Exceptions.terminate(ERROR, this); Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); logBuilder.log("MessageFlux reached a terminal error-state, signaling it downstream.", e); downstream.onError(e); return true; } Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); logBuilder.log("MessageFlux reached a terminal completion-state, signaling it downstream."); downstream.onComplete(); return true; } return false; } /** * * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p></p> * 1. When retry is enabled (i.e., NULL_RETRY_POLICY is not set) then schedule request (retry) for the next mediator if * <ul> * <li>the operator is not in a termination signaled state,</li> * <li> and * <ul> * <li>there is no error Or</li> * <li>error is retriable and the retry is not exhausted.</li> * </ul> * </li> * </ul> * 2. If retry is enabled (i.e., NULL_RETRY_POLICY is not set) and there is 'non-retriable or retry exhaust' * error, then set an error signal for the drain-loop to terminate the operator. * <br/> * 3. If retry is disabled (i.e., NULL_RETRY_POLICY is set), then set an error signal (if first receiver error-ed) * or completion signal (if first receiver completed) for the drain-loop to terminate the operator. * <p></p> * @param error the error that leads to error-ed termination of the last mediator or {@code null} * if terminated with completion. * @param downstream the downstream. * @param mediatorHolder the mediator holder. */ private void setTerminationSignalOrScheduleNextMediatorRequest(Throwable error, CoreSubscriber<? super Message> downstream, MediatorHolder mediatorHolder) { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); if (cancelled || done) { logBuilder.log("MessageFlux reached terminal-state [done:{}, cancelled:{}].", done, cancelled); return; } if (retryPolicy == NULL_RETRY_POLICY) { if (error == null) { onComplete(); } else { onError(error); } return; } final Duration delay; if (error == null) { delay = Duration.ofSeconds(1); logBuilder.addKeyValue("retryAfter", delay.toMillis()) .log("Current mediator reached terminal completion-state (retriable:true)."); } else { final int attempt = retryAttempts.incrementAndGet(); delay = retryPolicy.calculateRetryDelay(error, attempt); if (delay != null) { logBuilder.addKeyValue("attempt", attempt) .addKeyValue("retryAfter", delay.toMillis()) .log("Current mediator reached terminal error-state (retriable:true).", error); } else { logBuilder.addKeyValue("attempt", attempt) .log("Current mediator reached terminal error-state (retriable:false) Or MessageFlux retries exhausted.", error); onError(error); return; } } try { scheduleNextMediatorRequest(delay, mediatorHolder); } catch (RejectedExecutionException ree) { final RuntimeException e = Operators.onRejectedExecution(ree, downstream.currentContext()); mediatorHolder.withReceiverInfo(logger.atWarning()) .log("Unable to schedule a request for a new mediator (retriable:false).", e); onError(e); } } /** * Schedule a task to request a new mediator. * * @param delay the backoff duration before requesting the next mediator. * @param mediatorHolder the mediator holder. * @throws RejectedExecutionException if the scheduler is unable to schedule the task. */ private void scheduleNextMediatorRequest(Duration delay, MediatorHolder mediatorHolder) { final Runnable task = () -> { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); if (cancelled || done) { logBuilder.log("During the backoff, MessageFlux reached terminal-state [done:{}, cancelled:{}].", done, cancelled); return; } logBuilder.log("Requesting a new mediator."); upstream.request(1); }; mediatorHolder.nextMediatorRequestDisposable = Schedulers.parallel().schedule(task, delay.toMillis(), TimeUnit.MILLISECONDS); } }
Reviewing the code again, it looks like we can simply remove `doOnEach(..)` and have `subscribe(,,,)` takes care of signal reactions. I’ll update the code. **[DONE]**
void onParentReady() { updateLogWithReceiverId(logger.atWarning()).log("Setting next mediator and waiting for activation."); receiver.receive().subscribe(this); final Disposable endpointDisposable = receiver.getEndpointStates() .filter(s -> s == AmqpEndpointState.ACTIVE) .publishOn(ReceiversPumpingScheduler.instance()) .doOnEach(event -> { if (event.isOnNext()) { assert event.get() == AmqpEndpointState.ACTIVE; if (!ready) { updateLogWithReceiverId(logger.atWarning()).log("The mediator is active."); ready = true; parent.onMediatorReady(this::updateDisposition); } return; } if (event.isOnError()) { final Throwable e = event.getThrowable(); updateLogWithReceiverId(logger.atWarning()).log("Receiver emitted terminal error.", e); onLinkError(e); return; } if (event.isOnComplete()) { updateLogWithReceiverId(logger.atWarning()).log("Receiver emitted terminal completion."); onLinkComplete(); } }).subscribe(__ -> { }, __ -> { }, () -> { }); endpointStateDisposable.add(endpointDisposable); }
.doOnEach(event -> {
void onParentReady() { updateLogWithReceiverId(logger.atWarning()).log("Setting next mediator and waiting for activation."); receiver.receive().subscribe(this); final Disposable endpointDisposable = receiver.getEndpointStates() .filter(s -> s == AmqpEndpointState.ACTIVE) .publishOn(ReceiversPumpingScheduler.instance()) .subscribe(state -> { assert state == AmqpEndpointState.ACTIVE; if (!ready) { updateLogWithReceiverId(logger.atWarning()).log("The mediator is active."); ready = true; parent.onMediatorReady(this::updateDisposition); } }, e -> { updateLogWithReceiverId(logger.atWarning()).log("Receiver emitted terminal error.", e); onLinkError(e); }, () -> { updateLogWithReceiverId(logger.atWarning()).log("Receiver emitted terminal completion."); onLinkComplete(); }); endpointStateDisposable.add(endpointDisposable); }
class ReactorReceiverMediator implements AsyncCloseable, CoreSubscriber<Message>, Subscription { private static final Subscription CANCELLED_SUBSCRIPTION = Operators.cancelledSubscription(); private final RecoverableReactorReceiver parent; private final AmqpReceiveLink receiver; private final int prefetch; private final CreditFlowMode creditFlowMode; private final ClientLogger logger; private final Disposable.Composite endpointStateDisposable = Disposables.composite(); private CreditAccountingStrategy creditAccounting; private volatile boolean ready; private volatile Subscription s; @SuppressWarnings("rawtypes") private static final AtomicReferenceFieldUpdater<ReactorReceiverMediator, Subscription> S = AtomicReferenceFieldUpdater.newUpdater(ReactorReceiverMediator.class, Subscription.class, "s"); volatile Throwable error; @SuppressWarnings("rawtypes") static final AtomicReferenceFieldUpdater<ReactorReceiverMediator, Throwable> ERROR = AtomicReferenceFieldUpdater.newUpdater(ReactorReceiverMediator.class, Throwable.class, "error"); /** * The flag indicating if the mediator is terminated by completion or error. */ volatile boolean done; /** * The drain loop iteration that first identifies the mediator as terminated (done == true) and * and drained (queue.isEmpty() == true) will initiate a retry to obtain the next mediator. While that retry * completion is pending, any request for messages from downstream may lead to further drain loop iterations; * the 'isRetryInitiated' flag ensures those drain loop iterations (those also see the mediator as terminated * and drained) will not initiate duplicate retries. */ volatile boolean isRetryInitiated; /** * The queue holding messages from the backing receiver's message publisher, waiting to be drained by * the drain-loop iterations. */ final Queue<Message> queue; /** * Create a mediator to channel events (messages, termination) from a receiver to recoverable-receiver. * * @param parent the recoverable-receiver (a.k.a. parent). * @param receiver the receiver backing the mediator. * @param prefetch the number of messages to prefetch using the receiver (for a less chatty network * and faster message processing on the client). * @param creditFlowMode the mode indicating how to compute the credit and when to send it to the broker. */ ReactorReceiverMediator(RecoverableReactorReceiver parent, AmqpReceiveLink receiver, int prefetch, CreditFlowMode creditFlowMode, ClientLogger logger) { this.parent = parent; this.receiver = receiver; this.prefetch = prefetch; this.creditFlowMode = creditFlowMode; this.logger = logger; this.queue = Queues.<Message>get(Integer.MAX_VALUE).get(); } /** * Invoked by the parent {@link RecoverableReactorReceiver} when it is ready to use this new mediator * (The mediator facilitate communication between the parent and the new receiver ({@link AmqpReceiveLink}) that * mediator wraps). In response, this mediator notifies the parent about its readiness by invoking * {@link RecoverableReactorReceiver */ /** * Invoked in response to the subscription to the receiver's message publisher. * * @param s the subscription to request messages from the receiver's message publisher and terminate * that publisher through cancellation when it is no longer needed. */ @Override public void onSubscribe(Subscription s) { if (Operators.setOnce(S, this, s)) { switch (creditFlowMode) { case RequestDriven: creditAccounting = new RequestDrivenCreditAccountingStrategy(receiver, s, prefetch, logger); break; case EmissionDriven: creditAccounting = new EmissionDrivenCreditAccountingStrategy(receiver, s, prefetch, logger); break; default: throw new IllegalArgumentException("Unknown CreditFlowMode " + creditFlowMode); } } } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p/> * Notify the latest view of the downstream request and messages emitted by the emitter-loop during * the last drain-loop iteration. * * @param request the latest view of the downstream request. * @param emitted the number of messages emitted by the latest emitter-loop run. */ void update(long request, long emitted) { if (ready && !done) { creditAccounting.update(request, emitted); } } /** * Invoked by the receiver's message publisher to deliver a message. * * @param message the message. */ @Override public void onNext(Message message) { if (done) { Operators.onNextDropped(message, parent.currentContext()); return; } if (s == Operators.cancelledSubscription()) { Operators.onDiscard(message, parent.currentContext()); return; } if (queue.offer(message)) { parent.drain(message); } else { Operators.onOperatorError(this, Exceptions.failWithOverflow(Exceptions.BACKPRESSURE_ERROR_QUEUE_FULL), parent.messageSubscriber.currentContext()); Operators.onDiscard(message, parent.messageSubscriber.currentContext()); done = true; parent.drain(message); } } @Override public void onError(Throwable e) { } /** * Invoked by the receiver's endpoint publisher to signal mediator termination with an error. * * @param e the error signaled. */ private void onLinkError(Throwable e) { if (done) { Operators.onErrorDropped(e, parent.messageSubscriber.currentContext()); return; } if (ERROR.compareAndSet(this, null, e)) { done = true; parent.drain(null); } else { done = true; Operators.onErrorDropped(e, parent.messageSubscriber.currentContext()); } } @Override public void onComplete() { } /** * Invoked by the receiver's endpoint publisher to signal mediator termination with completion. */ private void onLinkComplete() { if (done) { return; } done = true; parent.drain(null); } @Override public void request(long n) { throw new IllegalStateException("The request accounting must be through update(,)."); } @Override public void cancel() { if (Operators.terminate(S, this)) { Operators.onDiscardQueueWithClear(queue, parent.currentContext(), null); } endpointStateDisposable.dispose(); } /** * Close the mediator. Closing is triggered in the following cases - * <ul> * <li>When {@link RecoverableReactorReceiver} switches to a new (i.e., next) mediator, it closes the current mediator.</li> * <li>When {@link RecoverableReactorReceiver} terminates (hence {@link MessageFlux}) due to * <ul> * <li>downstream cancellation or</li> * <li>upstream termination with error or completion or</li> * <li>retry-exhaust-error or non-retriable-error or</li> * <li>termination of receiver with error or completion when NULL_RETRY_POLICY is set,</li> * </ul> * it closes the current (i.e., last) mediator. </li> * </ul> */ @Override public Mono<Void> closeAsync() { cancel(); return receiver.closeAsync(); } /** * Updates the disposition state of a message uniquely identified by the given delivery tag. * * @param deliveryTag delivery tag of message. * @param deliveryState Delivery state of message. * * @return A Mono that completes when the state is successfully updated and acknowledged by message broker. */ private Mono<Void> updateDisposition(String deliveryTag, DeliveryState deliveryState) { if (done || s == CANCELLED_SUBSCRIPTION) { final String state = String.format("[link.done:%b link.cancelled:%b parent.done:%b parent.cancelled:%b]", done, s == CANCELLED_SUBSCRIPTION, parent.done, parent.cancelled); final DeliveryNotOnLinkException dispositionError = DeliveryNotOnLinkException.linkClosed(deliveryTag, deliveryState); final Throwable receiverError = error; if (receiverError != null) { dispositionError.addSuppressed(receiverError); } final Throwable upstreamError = parent.error; if (upstreamError != null) { dispositionError.addSuppressed(upstreamError); } return monoError(logger.atError() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue(DELIVERY_STATE_KEY, deliveryState) .addKeyValue("messageFluxState", state), dispositionError); } return receiver.updateDisposition(deliveryTag, deliveryState); } private LoggingEventBuilder updateLogWithReceiverId(LoggingEventBuilder builder) { return builder .addKeyValue(CONNECTION_ID_KEY, receiver.getConnectionId()) .addKeyValue(LINK_NAME_KEY, receiver.getLinkName()) .addKeyValue(ENTITY_PATH_KEY, receiver.getEntityPath()); } }
class ReactorReceiverMediator implements AsyncCloseable, CoreSubscriber<Message>, Subscription { private static final Subscription CANCELLED_SUBSCRIPTION = Operators.cancelledSubscription(); private final RecoverableReactorReceiver parent; private final AmqpReceiveLink receiver; private final int prefetch; private final CreditFlowMode creditFlowMode; private final ClientLogger logger; private final Disposable.Composite endpointStateDisposable = Disposables.composite(); private CreditAccountingStrategy creditAccounting; private volatile boolean ready; private volatile Subscription s; @SuppressWarnings("rawtypes") private static final AtomicReferenceFieldUpdater<ReactorReceiverMediator, Subscription> S = AtomicReferenceFieldUpdater.newUpdater(ReactorReceiverMediator.class, Subscription.class, "s"); volatile Throwable error; @SuppressWarnings("rawtypes") static final AtomicReferenceFieldUpdater<ReactorReceiverMediator, Throwable> ERROR = AtomicReferenceFieldUpdater.newUpdater(ReactorReceiverMediator.class, Throwable.class, "error"); /** * The flag indicating if the mediator is terminated by completion or error. */ volatile boolean done; /** * The drain loop iteration that first identifies the mediator as terminated (done == true) and * and drained (queue.isEmpty() == true) will initiate a retry to obtain the next mediator. While that retry * completion is pending, any request for messages from downstream may lead to further drain loop iterations; * the 'isRetryInitiated' flag ensures those drain loop iterations (those also see the mediator as terminated * and drained) will not initiate duplicate retries. */ volatile boolean isRetryInitiated; /** * The queue holding messages from the backing receiver's message publisher, waiting to be drained by * the drain-loop iterations. */ final Queue<Message> queue; /** * Create a mediator to channel events (messages, termination) from a receiver to recoverable-receiver. * * @param parent the recoverable-receiver (a.k.a. parent). * @param receiver the receiver backing the mediator. * @param prefetch the number of messages to prefetch using the receiver (for a less chatty network * and faster message processing on the client). * @param creditFlowMode the mode indicating how to compute the credit and when to send it to the broker. */ ReactorReceiverMediator(RecoverableReactorReceiver parent, AmqpReceiveLink receiver, int prefetch, CreditFlowMode creditFlowMode, ClientLogger logger) { this.parent = parent; this.receiver = receiver; this.prefetch = prefetch; this.creditFlowMode = creditFlowMode; this.logger = logger; this.queue = Queues.<Message>get(Integer.MAX_VALUE).get(); } /** * Invoked by the parent {@link RecoverableReactorReceiver} when it is ready to use this new mediator * (The mediator facilitate communication between the parent and the new receiver ({@link AmqpReceiveLink}) that * mediator wraps). In response, this mediator notifies the parent about its readiness by invoking * {@link RecoverableReactorReceiver */ /** * Invoked in response to the subscription to the receiver's message publisher. * * @param s the subscription to request messages from the receiver's message publisher and terminate * that publisher through cancellation when it is no longer needed. */ @Override public void onSubscribe(Subscription s) { if (Operators.setOnce(S, this, s)) { switch (creditFlowMode) { case RequestDriven: creditAccounting = new RequestDrivenCreditAccountingStrategy(receiver, s, prefetch, logger); break; case EmissionDriven: creditAccounting = new EmissionDrivenCreditAccountingStrategy(receiver, s, prefetch, logger); break; default: throw new IllegalArgumentException("Unknown CreditFlowMode " + creditFlowMode); } } } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p> * Notify the latest view of the downstream request and messages emitted by the emitter-loop during * the last drain-loop iteration. * </p> * @param request the latest view of the downstream request. * @param emitted the number of messages emitted by the latest emitter-loop run. */ void update(long request, long emitted) { if (ready && !done) { creditAccounting.update(request, emitted); } } /** * Invoked by the receiver's message publisher to deliver a message. * * @param message the message. */ @Override public void onNext(Message message) { if (done) { Operators.onNextDropped(message, parent.currentContext()); return; } if (s == Operators.cancelledSubscription()) { Operators.onDiscard(message, parent.currentContext()); return; } if (queue.offer(message)) { parent.drain(message); } else { Operators.onOperatorError(this, Exceptions.failWithOverflow(Exceptions.BACKPRESSURE_ERROR_QUEUE_FULL), parent.messageSubscriber.currentContext()); Operators.onDiscard(message, parent.messageSubscriber.currentContext()); done = true; parent.drain(message); } } @Override public void onError(Throwable e) { } /** * Invoked by the receiver's endpoint publisher to signal mediator termination with an error. * * @param e the error signaled. */ private void onLinkError(Throwable e) { if (done) { Operators.onErrorDropped(e, parent.messageSubscriber.currentContext()); return; } if (ERROR.compareAndSet(this, null, e)) { done = true; parent.drain(null); } else { done = true; Operators.onErrorDropped(e, parent.messageSubscriber.currentContext()); } } @Override public void onComplete() { } /** * Invoked by the receiver's endpoint publisher to signal mediator termination with completion. */ private void onLinkComplete() { if (done) { return; } done = true; parent.drain(null); } @Override public void request(long n) { throw new IllegalStateException("The request accounting must be through update(,)."); } @Override public void cancel() { if (Operators.terminate(S, this)) { Operators.onDiscardQueueWithClear(queue, parent.currentContext(), null); } endpointStateDisposable.dispose(); } /** * Close the mediator. Closing is triggered in the following cases - * <ul> * <li>When {@link RecoverableReactorReceiver} switches to a new (i.e., next) mediator, it closes the current mediator.</li> * <li>When {@link RecoverableReactorReceiver} terminates (hence {@link MessageFlux}) due to * <ul> * <li>downstream cancellation or</li> * <li>upstream termination with error or completion or</li> * <li>retry-exhaust-error or non-retriable-error or</li> * <li>termination of receiver with error or completion when NULL_RETRY_POLICY is set,</li> * </ul> * it closes the current (i.e., last) mediator. </li> * </ul> */ @Override public Mono<Void> closeAsync() { cancel(); return receiver.closeAsync(); } /** * Updates the disposition state of a message uniquely identified by the given delivery tag. * * @param deliveryTag delivery tag of message. * @param deliveryState Delivery state of message. * * @return A Mono that completes when the state is successfully updated and acknowledged by message broker. */ private Mono<Void> updateDisposition(String deliveryTag, DeliveryState deliveryState) { if (done || s == CANCELLED_SUBSCRIPTION) { final String state = String.format("[link.done:%b link.cancelled:%b parent.done:%b parent.cancelled:%b]", done, s == CANCELLED_SUBSCRIPTION, parent.done, parent.cancelled); final DeliveryNotOnLinkException dispositionError = DeliveryNotOnLinkException.linkClosed(deliveryTag, deliveryState); final Throwable receiverError = error; if (receiverError != null) { dispositionError.addSuppressed(receiverError); } final Throwable upstreamError = parent.error; if (upstreamError != null) { dispositionError.addSuppressed(upstreamError); } return monoError(logger.atError() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue(DELIVERY_STATE_KEY, deliveryState) .addKeyValue("messageFluxState", state), dispositionError); } return receiver.updateDisposition(deliveryTag, deliveryState); } private LoggingEventBuilder updateLogWithReceiverId(LoggingEventBuilder builder) { return builder .addKeyValue(CONNECTION_ID_KEY, receiver.getConnectionId()) .addKeyValue(LINK_NAME_KEY, receiver.getLinkName()) .addKeyValue(ENTITY_PATH_KEY, receiver.getEntityPath()); } }
You’re right let's make it a static function, there is no need to allocate this lambda for each MessageFlux instance. **[DONE]**
public MessageFlux(Flux<? extends AmqpReceiveLink> source, int prefetch, CreditFlowMode creditFlowMode, AmqpRetryPolicy retryPolicy) { super(source); final Map<String, Object> loggingContext = new HashMap<>(1); loggingContext.put(MESSAGE_FLUX_KEY, StringUtil.getRandomString("mf")); this.logger = new ClientLogger(MessageFlux.class, loggingContext); if (prefetch < 0) { throw new IllegalArgumentException("prefetch >= 0 required but it was " + prefetch); } this.prefetch = prefetch; this.creditFlowMode = creditFlowMode; this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null."); this.updateDispositionFunc = (t, s) -> Mono.error(new IllegalStateException("Cannot update disposition as no receive-link is established.")); }
this.updateDispositionFunc = (t, s) -> Mono.error(new IllegalStateException("Cannot update disposition as no receive-link is established."));
public MessageFlux(Flux<? extends AmqpReceiveLink> source, int prefetch, CreditFlowMode creditFlowMode, AmqpRetryPolicy retryPolicy) { super(source); final Map<String, Object> loggingContext = new HashMap<>(1); loggingContext.put(MESSAGE_FLUX_KEY, StringUtil.getRandomString("mf")); this.logger = new ClientLogger(MessageFlux.class, loggingContext); if (prefetch < 0) { throw new IllegalArgumentException("prefetch >= 0 required but it was " + prefetch); } this.prefetch = prefetch; this.creditFlowMode = creditFlowMode; this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null."); this.updateDispositionFunc = DispositionFunction.NO_DISPOSITION; }
class MessageFlux extends FluxOperator<AmqpReceiveLink, Message> { /** An AmqpRetryPolicy const indicates that MessageFlux should terminate when the first receiver terminates * (i.e., disables the retry action to obtain next receiver from the upstream). **/ public static final AmqpRetryPolicy NULL_RETRY_POLICY = new FixedAmqpRetryPolicy(new AmqpRetryOptions()); private static final String MESSAGE_FLUX_KEY = "messageFlux"; private final ClientLogger logger; private final int prefetch; private final CreditFlowMode creditFlowMode; private final AmqpRetryPolicy retryPolicy; private volatile BiFunction<String, DeliveryState, Mono<Void>> updateDispositionFunc; /** * Create a message-flux to stream messages from a messaging entity to downstream subscriber. * * @param source the upstream source that, upon a request, provide a new receiver connected to the messaging entity. * @param prefetch the number of messages that the operator should prefetch from the messaging entity (for a * less chatty network and faster message processing on the client). * @param creditFlowMode the mode indicating how to compute the credit and when to send it to the broker. * @param retryPolicy the retry policy to use to obtain a new receiver upon current receiver termination. * @throws IllegalStateException if the {@code prefetch} is a negative value. * @throws NullPointerException if the {@code retryPolicy} is {@code null}. */ /** * Register the downstream subscriber. * * @param actual the downstream subscriber interested in the published messages and termination. */ @Override public void subscribe(CoreSubscriber<? super Message> actual) { source.subscribe(new RecoverableReactorReceiver(this, actual, prefetch, creditFlowMode, retryPolicy)); } /** * Updates the disposition state of a message uniquely identified by the given delivery tag. * * @param deliveryTag delivery tag of message. * @param deliveryState Delivery state of message. * * @return A Mono that completes when the state is successfully updated and acknowledged by message broker. */ public Mono<Void> updateDisposition(String deliveryTag, DeliveryState deliveryState) { final BiFunction<String, DeliveryState, Mono<Void>> updateDispositionFunc = this.updateDispositionFunc; return updateDispositionFunc.apply(deliveryTag, deliveryState); } /** * The callback invoked when next receiver is attached to the messaging entity from which this message-flux * instance stream messages. There will be only one receiver at a time, and this callback delivers the reference * to the function to disposition messages that arrives in the new receiver. * * @param updateDispositionFunc the function to disposition messages delivered by the current backing receiver. */ void onNextUpdateDispositionFunction(BiFunction<String, DeliveryState, Mono<Void>> updateDispositionFunc) { this.updateDispositionFunc = updateDispositionFunc; } /** * The underlying consumer and producer extension of the message-flux operator. The consuming side processes events * (about new receiver, terminal signals) from the upstream and events (messages, terminal signals) from * the current receiver. The producing side publishes the messages to message-flux's downstream. The type has * a recovery mechanism to obtain a new receiver from upstream upon the current receiver's termination. * Recoveries happen underneath while the messages flow transparently downstream. The type can terminate downstream * if the upstream terminates, the recovery path encounters a non-retriable error (i.e., the current receiver * terminated with a non-retriable error), or recovery retries exhaust. */ private static final class RecoverableReactorReceiver implements CoreSubscriber<AmqpReceiveLink>, Subscription { private final MediatorHolder mediatorHolder = new MediatorHolder(); private final MessageFlux parent; private final int prefetch; private final CreditFlowMode creditFlowMode; private final AmqpRetryPolicy retryPolicy; private final ClientLogger logger; private final AtomicInteger retryAttempts = new AtomicInteger(); private final CoreSubscriber<? super Message> messageSubscriber; private Subscription upstream; private volatile long requested; @SuppressWarnings("rawtypes") private static final AtomicLongFieldUpdater<RecoverableReactorReceiver> REQUESTED = AtomicLongFieldUpdater.newUpdater(RecoverableReactorReceiver.class, "requested"); private volatile int wip; @SuppressWarnings("rawtypes") private static final AtomicIntegerFieldUpdater<RecoverableReactorReceiver> WIP = AtomicIntegerFieldUpdater.newUpdater(RecoverableReactorReceiver.class, "wip"); private volatile boolean done; private volatile boolean cancelled; volatile Throwable error; @SuppressWarnings("rawtypes") static final AtomicReferenceFieldUpdater<RecoverableReactorReceiver, Throwable> ERROR = AtomicReferenceFieldUpdater.newUpdater(RecoverableReactorReceiver.class, Throwable.class, "error"); /** * Create a recoverable-receiver that supports the message-flux to stream messages from the receiver attached * to a messaging entity to the message-flux's downstream subscriber and recover from receiver termination. * * @param parent the parent message-flux. * @param messageSubscriber the message-flux's downstream subscriber to notify the events. * @param prefetch the number of messages that the operator should prefetch from the messaging entity * (for a less chatty network and faster message processing on the client). * @param creditFlowMode the mode indicating how to compute the credit and when to send it to the broker. * @param retryPolicy the retry policy to use to recover from receiver termination. */ RecoverableReactorReceiver(MessageFlux parent, CoreSubscriber<? super Message> messageSubscriber, int prefetch, CreditFlowMode creditFlowMode, AmqpRetryPolicy retryPolicy) { this.parent = parent; this.messageSubscriber = messageSubscriber; this.prefetch = prefetch; this.creditFlowMode = creditFlowMode; this.retryPolicy = retryPolicy; this.logger = parent.logger; } /** * Invoked by the upstream in response to message-flux subscribing to it. * * @param s the subscription handle for requesting receivers from the upstream or terminating upstream * through cancellation when it is no longer needed. */ @Override public void onSubscribe(Subscription s) { if (Operators.validate(upstream, s)) { upstream = s; messageSubscriber.onSubscribe(this); s.request(1); } } /** * Invoked by the upstream to deliver new receiver. * * @param receiver the new receiver. */ @Override public void onNext(AmqpReceiveLink receiver) { if (done) { receiver.closeAsync().subscribe(); Operators.onNextDropped(receiver, messageSubscriber.currentContext()); return; } final ReactorReceiverMediator mediator = new ReactorReceiverMediator(this, receiver, prefetch, creditFlowMode, logger); if (mediatorHolder.trySet(mediator)) { mediator.onParentReady(); } else { logger.atWarning() .addKeyValue("oldLinkName", mediatorHolder.getLinkName()) .addKeyValue(LINK_NAME_KEY, receiver.getLinkName()) .addKeyValue(ENTITY_PATH_KEY, receiver.getEntityPath()) .log("Got a AmqpReceiveLink when the MessageFlux is already terminated."); receiver.closeAsync().subscribe(); Operators.onDiscard(receiver, messageSubscriber.currentContext()); } } /** * Signals operator termination with error. * <ul> * <li>Invoked by the upstream when it errors, or</li> * <li>Invoked by the drain-loop when it detects a 'non-retriable or retry exhaust' error, or </li> * <li>Invoked by the drain-loop when it detects the receiver signaled terminal completion and retry * is disabled (i.e., NULL_RETRY_POLICY is set).</li> * </ul> * * @param e the error signaled. */ @Override public void onError(Throwable e) { if (done) { Operators.onErrorDropped(e, messageSubscriber.currentContext()); return; } if (Exceptions.addThrowable(ERROR, this, e)) { done = true; final String logMessage; if (retryPolicy == NULL_RETRY_POLICY) { logMessage = "Terminal error signal from Upstream|Receiver arrived at MessageFlux."; } else { logMessage = "Terminal error signal from Upstream|RetryLoop arrived at MessageFlux."; } mediatorHolder.withReceiverInfo(logger.atWarning()).log(logMessage, e); drain(null); } else { Operators.onErrorDropped(e, messageSubscriber.currentContext()); } } /** * Signals operator termination with completion. * <ul> * <li>Invoked by the upstream when it completes, or</li> * <li>Invoked by the the drain-loop when it detects first receiver signaled terminal completion and retry * is disabled (i.e., NULL_RETRY_POLICY is set).</li> * </ul> */ @Override public void onComplete() { if (done) { return; } done = true; final String logMessage; if (retryPolicy == NULL_RETRY_POLICY) { logMessage = "Terminal completion signal from Upstream|Receiver arrived at MessageFlux."; } else { logMessage = "Terminal completion signal from Upstream arrived at MessageFlux."; } mediatorHolder.withReceiverInfo(logger.atWarning()).log(logMessage); drain(null); } /** * Invoked by the downstream message subscriber to signal the demand for messages. Whatever has been * requested can be sent downstream, so only signal the demand for what can be safely handled. * No messages will be sent downstream until the demand is signaled. * * @param n the number of messages to send to downstream. */ @Override public void request(long n) { if (Operators.validate(n)) { Operators.addCap(REQUESTED, this, n); drain(null); } } /** * Invoked by downstream to signal termination by cancellation. */ @Override public void cancel() { if (cancelled) { return; } cancelled = true; mediatorHolder.withReceiverInfo(logger.atWarning()) .log("Downstream cancellation signal arrived at MessageFlux."); if (WIP.getAndIncrement(this) == 0) { upstream.cancel(); mediatorHolder.freeze(); } } /** * Invoked by the new mediator when it is ready to be used. A mediator constructed in 'onNext' moves * to ready state when its backing receiver is active. * * @param updateDispositionFunc the function to disposition messages from mediator's backing receiver. */ void onMediatorReady(BiFunction<String, DeliveryState, Mono<Void>> updateDispositionFunc) { retryAttempts.set(0); parent.onNextUpdateDispositionFunction(updateDispositionFunc); drain(null); } /** * The serialized entry point to drain-loop. * * @param dataSignal the message to drop if the operator is terminated by cancellation. */ void drain(Message dataSignal) { if (WIP.getAndIncrement(this) != 0) { if (dataSignal != null && cancelled) { Operators.onDiscard(dataSignal, messageSubscriber.currentContext()); } return; } drainLoop(); } /** * The serialized drain-loop (implementation patterns inspired from RxJava, Reactor Operators). * Reference: 'Operator Concurrency Primitives' series https: */ private void drainLoop() { int missed = 1; CoreSubscriber<? super Message> downstream = messageSubscriber; for (; ;) { boolean d = done; ReactorReceiverMediator mediator = mediatorHolder.mediator; boolean hasMediator = mediator != null; if (terminateIfCancelled(downstream, null)) { return; } if (terminateIfErrorOrCompletionSignaled(d, downstream, null)) { return; } long r = requested; long emitted = 0L; boolean mediatorTerminatedAndDrained = false; if (r != 0L && hasMediator) { Queue<Message> q = mediator.queue; while (emitted != r) { Message message = q.poll(); if (terminateIfCancelled(downstream, message)) { return; } if (terminateIfErrorOrCompletionSignaled(done, downstream, message)) { return; } boolean empty = message == null; if (empty && mediator.done) { mediatorTerminatedAndDrained = true; break; } if (empty) { break; } messageSubscriber.onNext(message); emitted++; } if (emitted == r) { if (mediator.queue.isEmpty() && mediator.done) { mediatorTerminatedAndDrained = true; } } if (emitted != 0 && r != Long.MAX_VALUE) { r = REQUESTED.addAndGet(this, -emitted); } mediator.update(r, emitted); } if (r == 0L && hasMediator) { if (terminateIfCancelled(downstream, null)) { return; } if (terminateIfErrorOrCompletionSignaled(done, downstream, null)) { return; } if (mediator.queue.isEmpty() && mediator.done) { mediatorTerminatedAndDrained = true; } } if (mediatorTerminatedAndDrained && !mediator.isRetryInitiated) { mediator.isRetryInitiated = true; mediator.closeAsync().subscribe(); setTerminationSignalOrScheduleNextMediatorRequest(mediator.error, downstream, mediatorHolder); } missed = WIP.addAndGet(this, -missed); if (missed == 0) { break; } } } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p/> * See if downstream signaled cancellation to terminate the operator, if so, react to the cancellation. * * @param downstream the downstream. * @param messageDropped the message that gets dropped if cancellation was signaled. * @return true if canceled, false otherwise. */ private boolean terminateIfCancelled(CoreSubscriber<? super Message> downstream, Message messageDropped) { if (cancelled) { Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); return true; } return false; } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p/> * See if there is a pending signal for the operator termination with error or completion, if so, react to it * by terminating downstream. * * @param d indicate if the operator termination was signaled. * @param downstream the downstream. * @param messageDropped the message that gets dropped if termination happened. * @return true if terminated, false otherwise. */ private boolean terminateIfErrorOrCompletionSignaled(boolean d, CoreSubscriber<? super Message> downstream, Message messageDropped) { if (d) { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); Throwable e = error; if (e != null && e != Exceptions.TERMINATED) { e = Exceptions.terminate(ERROR, this); Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); logBuilder.log("MessageFlux reached a terminal error-state, signaling it downstream.", e); downstream.onError(e); return true; } Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); logBuilder.log("MessageFlux reached a terminal completion-state, signaling it downstream."); downstream.onComplete(); return true; } return false; } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p/> * 1. When retry is enabled (i.e., NULL_RETRY_POLICY is not set) then schedule request (retry) for the next mediator if * <ul> * <li>the operator is not in a termination signaled state,</li> * <li> and * <ul> * <li>there is no error Or</li> * <li>error is retriable and the retry is not exhausted.</li> * </ul> * </li> * </ul> * 2. If retry is enabled (i.e., NULL_RETRY_POLICY is not set) and there is 'non-retriable or retry exhaust' * error, then set an error signal for the drain-loop to terminate the operator. * <br/> * 3. If retry is disabled (i.e., NULL_RETRY_POLICY is set), then set an error signal (if first receiver error-ed) * or completion signal (if first receiver completed) for the drain-loop to terminate the operator. * <br/> * @param error the error that leads to error-ed termination of the last mediator or {@code null} * if terminated with completion. * @param downstream the downstream. * @param mediatorHolder the mediator holder. */ private void setTerminationSignalOrScheduleNextMediatorRequest(Throwable error, CoreSubscriber<? super Message> downstream, MediatorHolder mediatorHolder) { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); if (cancelled || done) { logBuilder.log("MessageFlux reached terminal-state [done:{}, cancelled:{}].", done, cancelled); return; } if (retryPolicy == NULL_RETRY_POLICY) { if (error == null) { onComplete(); } else { onError(error); } return; } final Duration delay; if (error == null) { delay = Duration.ofSeconds(1); logBuilder.addKeyValue("retryAfter", delay.toMillis()) .log("Current mediator reached terminal completion-state (retriable:true)."); } else { final int attempt = retryAttempts.incrementAndGet(); delay = retryPolicy.calculateRetryDelay(error, attempt); if (delay != null) { logBuilder.addKeyValue("attempt", attempt) .addKeyValue("retryAfter", delay.toMillis()) .log("Current mediator reached terminal error-state (retriable:true).", error); } else { logBuilder.addKeyValue("attempt", attempt) .log("Current mediator reached terminal error-state (retriable:false) Or MessageFlux retries exhausted.", error); onError(error); return; } } try { scheduleNextMediatorRequest(delay, mediatorHolder); } catch (RejectedExecutionException ree) { final RuntimeException e = Operators.onRejectedExecution(ree, downstream.currentContext()); mediatorHolder.withReceiverInfo(logger.atWarning()) .log("Unable to schedule a request for a new mediator (retriable:false).", e); onError(e); } } /** * Schedule a task to request a new mediator. * * @param delay the backoff duration before requesting the next mediator. * @param mediatorHolder the mediator holder. * @throws RejectedExecutionException if the scheduler is unable to schedule the task. */ private void scheduleNextMediatorRequest(Duration delay, MediatorHolder mediatorHolder) { final Runnable task = () -> { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); if (cancelled || done) { logBuilder.log("During the backoff, MessageFlux reached terminal-state [done:{}, cancelled:{}].", done, cancelled); return; } logBuilder.log("Requesting a new mediator."); upstream.request(1); }; mediatorHolder.nextMediatorRequestDisposable = Schedulers.parallel().schedule(task, delay.toMillis(), TimeUnit.MILLISECONDS); } } /** * The mediator that coordinates between {@link RecoverableReactorReceiver} and a receiver {@link AmqpReceiveLink}. */ private static final class ReactorReceiverMediator implements AsyncCloseable, CoreSubscriber<Message>, Subscription { private static final Subscription CANCELLED_SUBSCRIPTION = Operators.cancelledSubscription(); private final RecoverableReactorReceiver parent; private final AmqpReceiveLink receiver; private final int prefetch; private final CreditFlowMode creditFlowMode; private final ClientLogger logger; private final Disposable.Composite endpointStateDisposable = Disposables.composite(); private CreditAccountingStrategy creditAccounting; private volatile boolean ready; private volatile Subscription s; @SuppressWarnings("rawtypes") private static final AtomicReferenceFieldUpdater<ReactorReceiverMediator, Subscription> S = AtomicReferenceFieldUpdater.newUpdater(ReactorReceiverMediator.class, Subscription.class, "s"); volatile Throwable error; @SuppressWarnings("rawtypes") static final AtomicReferenceFieldUpdater<ReactorReceiverMediator, Throwable> ERROR = AtomicReferenceFieldUpdater.newUpdater(ReactorReceiverMediator.class, Throwable.class, "error"); /** * The flag indicating if the mediator is terminated by completion or error. */ volatile boolean done; /** * The drain loop iteration that first identifies the mediator as terminated (done == true) and * and drained (queue.isEmpty() == true) will initiate a retry to obtain the next mediator. While that retry * completion is pending, any request for messages from downstream may lead to further drain loop iterations; * the 'isRetryInitiated' flag ensures those drain loop iterations (those also see the mediator as terminated * and drained) will not initiate duplicate retries. */ volatile boolean isRetryInitiated; /** * The queue holding messages from the backing receiver's message publisher, waiting to be drained by * the drain-loop iterations. */ final Queue<Message> queue; /** * Create a mediator to channel events (messages, termination) from a receiver to recoverable-receiver. * * @param parent the recoverable-receiver (a.k.a. parent). * @param receiver the receiver backing the mediator. * @param prefetch the number of messages to prefetch using the receiver (for a less chatty network * and faster message processing on the client). * @param creditFlowMode the mode indicating how to compute the credit and when to send it to the broker. */ ReactorReceiverMediator(RecoverableReactorReceiver parent, AmqpReceiveLink receiver, int prefetch, CreditFlowMode creditFlowMode, ClientLogger logger) { this.parent = parent; this.receiver = receiver; this.prefetch = prefetch; this.creditFlowMode = creditFlowMode; this.logger = logger; this.queue = Queues.<Message>get(Integer.MAX_VALUE).get(); } /** * Invoked by the parent {@link RecoverableReactorReceiver} when it is ready to use this new mediator * (The mediator facilitate communication between the parent and the new receiver ({@link AmqpReceiveLink}) that * mediator wraps). In response, this mediator notifies the parent about its readiness by invoking * {@link RecoverableReactorReceiver */ void onParentReady() { updateLogWithReceiverId(logger.atWarning()).log("Setting next mediator and waiting for activation."); receiver.receive().subscribe(this); final Disposable endpointDisposable = receiver.getEndpointStates() .filter(s -> s == AmqpEndpointState.ACTIVE) .publishOn(ReceiversPumpingScheduler.instance()) .doOnEach(event -> { if (event.isOnNext()) { assert event.get() == AmqpEndpointState.ACTIVE; if (!ready) { updateLogWithReceiverId(logger.atWarning()).log("The mediator is active."); ready = true; parent.onMediatorReady(this::updateDisposition); } return; } if (event.isOnError()) { final Throwable e = event.getThrowable(); updateLogWithReceiverId(logger.atWarning()).log("Receiver emitted terminal error.", e); onLinkError(e); return; } if (event.isOnComplete()) { updateLogWithReceiverId(logger.atWarning()).log("Receiver emitted terminal completion."); onLinkComplete(); } }).subscribe(__ -> { }, __ -> { }, () -> { }); endpointStateDisposable.add(endpointDisposable); } /** * Invoked in response to the subscription to the receiver's message publisher. * * @param s the subscription to request messages from the receiver's message publisher and terminate * that publisher through cancellation when it is no longer needed. */ @Override public void onSubscribe(Subscription s) { if (Operators.setOnce(S, this, s)) { switch (creditFlowMode) { case RequestDriven: creditAccounting = new RequestDrivenCreditAccountingStrategy(receiver, s, prefetch, logger); break; case EmissionDriven: creditAccounting = new EmissionDrivenCreditAccountingStrategy(receiver, s, prefetch, logger); break; default: throw new IllegalArgumentException("Unknown CreditFlowMode " + creditFlowMode); } } } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p/> * Notify the latest view of the downstream request and messages emitted by the emitter-loop during * the last drain-loop iteration. * * @param request the latest view of the downstream request. * @param emitted the number of messages emitted by the latest emitter-loop run. */ void update(long request, long emitted) { if (ready && !done) { creditAccounting.update(request, emitted); } } /** * Invoked by the receiver's message publisher to deliver a message. * * @param message the message. */ @Override public void onNext(Message message) { if (done) { Operators.onNextDropped(message, parent.currentContext()); return; } if (s == Operators.cancelledSubscription()) { Operators.onDiscard(message, parent.currentContext()); return; } if (queue.offer(message)) { parent.drain(message); } else { Operators.onOperatorError(this, Exceptions.failWithOverflow(Exceptions.BACKPRESSURE_ERROR_QUEUE_FULL), parent.messageSubscriber.currentContext()); Operators.onDiscard(message, parent.messageSubscriber.currentContext()); done = true; parent.drain(message); } } @Override public void onError(Throwable e) { } /** * Invoked by the receiver's endpoint publisher to signal mediator termination with an error. * * @param e the error signaled. */ private void onLinkError(Throwable e) { if (done) { Operators.onErrorDropped(e, parent.messageSubscriber.currentContext()); return; } if (ERROR.compareAndSet(this, null, e)) { done = true; parent.drain(null); } else { done = true; Operators.onErrorDropped(e, parent.messageSubscriber.currentContext()); } } @Override public void onComplete() { } /** * Invoked by the receiver's endpoint publisher to signal mediator termination with completion. */ private void onLinkComplete() { if (done) { return; } done = true; parent.drain(null); } @Override public void request(long n) { throw new IllegalStateException("The request accounting must be through update(,)."); } @Override public void cancel() { if (Operators.terminate(S, this)) { Operators.onDiscardQueueWithClear(queue, parent.currentContext(), null); } endpointStateDisposable.dispose(); } /** * Close the mediator. Closing is triggered in the following cases - * <ul> * <li>When {@link RecoverableReactorReceiver} switches to a new (i.e., next) mediator, it closes the current mediator.</li> * <li>When {@link RecoverableReactorReceiver} terminates (hence {@link MessageFlux}) due to * <ul> * <li>downstream cancellation or</li> * <li>upstream termination with error or completion or</li> * <li>retry-exhaust-error or non-retriable-error or</li> * <li>termination of receiver with error or completion when NULL_RETRY_POLICY is set,</li> * </ul> * it closes the current (i.e., last) mediator. </li> * </ul> */ @Override public Mono<Void> closeAsync() { cancel(); return receiver.closeAsync(); } /** * Updates the disposition state of a message uniquely identified by the given delivery tag. * * @param deliveryTag delivery tag of message. * @param deliveryState Delivery state of message. * * @return A Mono that completes when the state is successfully updated and acknowledged by message broker. */ private Mono<Void> updateDisposition(String deliveryTag, DeliveryState deliveryState) { if (done || s == CANCELLED_SUBSCRIPTION) { final String state = String.format("[link.done:%b link.cancelled:%b parent.done:%b parent.cancelled:%b]", done, s == CANCELLED_SUBSCRIPTION, parent.done, parent.cancelled); final DeliveryNotOnLinkException dispositionError = DeliveryNotOnLinkException.linkClosed(deliveryTag, deliveryState); final Throwable receiverError = error; if (receiverError != null) { dispositionError.addSuppressed(receiverError); } final Throwable upstreamError = parent.error; if (upstreamError != null) { dispositionError.addSuppressed(upstreamError); } return monoError(logger.atError() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue(DELIVERY_STATE_KEY, deliveryState) .addKeyValue("messageFluxState", state), dispositionError); } return receiver.updateDisposition(deliveryTag, deliveryState); } private LoggingEventBuilder updateLogWithReceiverId(LoggingEventBuilder builder) { return builder .addKeyValue(CONNECTION_ID_KEY, receiver.getConnectionId()) .addKeyValue(LINK_NAME_KEY, receiver.getLinkName()) .addKeyValue(ENTITY_PATH_KEY, receiver.getEntityPath()); } } /** * A type that supports atomically setting a mediator and disposing of the last set mediator upon freezing. * Once frozen, further attempt to set the mediator will be rejected. The object of this type holds * the current mediator that the drain-loop access to read events from the receiver (backing the mediator). */ private static final class MediatorHolder { private boolean isFrozen; volatile ReactorReceiverMediator mediator; volatile Disposable nextMediatorRequestDisposable; /** * Try to set the current mediator for the drain-loop. * * @param mediator the mediator. * @return true if the mediator is set successfully, false if the attempt to set is rejected due * to the holder in the frozen state. */ boolean trySet(ReactorReceiverMediator mediator) { synchronized (this) { if (isFrozen) { return false; } this.mediator = mediator; return true; } } /** * Freeze the holder to dispose of the current mediator and any resources it tracks; no further * mediator can be set once frozen. Freezing happens when the message-flux operator is terminated. */ void freeze() { final Disposable d; final ReactorReceiverMediator m; synchronized (this) { if (isFrozen) { return; } d = nextMediatorRequestDisposable; m = this.mediator; isFrozen = true; } if (d != null) { d.dispose(); } if (m != null) { m.closeAsync().subscribe(); } } String getLinkName() { final ReactorReceiverMediator m = mediator; return m != null ? m.receiver.getLinkName() : null; } /** * annotate the log builder with the receiver info (connectionId:linkName:entityPath) if the mediator has * receiver set, else nop. * * @param builder the log builder to annotate. * @return the log builder annotated with receiver info. */ LoggingEventBuilder withReceiverInfo(LoggingEventBuilder builder) { final ReactorReceiverMediator m = mediator; if (m != null) { return builder.addKeyValue(CONNECTION_ID_KEY, m.receiver.getConnectionId()) .addKeyValue(LINK_NAME_KEY, m.receiver.getLinkName()) .addKeyValue(ENTITY_PATH_KEY, m.receiver.getEntityPath()); } return builder; } } }
class MessageFlux extends FluxOperator<AmqpReceiveLink, Message> { /** An AmqpRetryPolicy const indicates that MessageFlux should terminate when the first receiver terminates * (i.e., disables the retry action to obtain next receiver from the upstream). **/ public static final AmqpRetryPolicy NULL_RETRY_POLICY = new FixedAmqpRetryPolicy(new AmqpRetryOptions()); private static final String MESSAGE_FLUX_KEY = "messageFlux"; private final ClientLogger logger; /** * The prefetch value used by the credit computation strategy. */ private final int prefetch; /** * The mode representing the strategy to compute and send the receiver credit. * See {@link CreditAccountingStrategy} */ private final CreditFlowMode creditFlowMode; /** * The retry policy to use to establish a new receiver when the current receiver encounter terminal error. */ private final AmqpRetryPolicy retryPolicy; /** * The function for updating disposition state of messages using the current receiver. */ private volatile DispositionFunction updateDispositionFunc; /** * Create a message-flux to stream messages from a messaging entity to downstream subscriber. * * @param source the upstream source that, upon a request, provide a new receiver connected to the messaging entity. * @param prefetch the number of messages that the operator should prefetch from the messaging entity (for a * less chatty network and faster message processing on the client). * @param creditFlowMode the mode indicating how to compute the credit and when to send it to the broker. * @param retryPolicy the retry policy to use to obtain a new receiver upon current receiver termination. * @throws IllegalStateException if the {@code prefetch} is a negative value. * @throws NullPointerException if the {@code retryPolicy} is {@code null}. */ /** * Register the downstream subscriber. * * @param actual the downstream subscriber interested in the published messages and termination. */ @Override public void subscribe(CoreSubscriber<? super Message> actual) { source.subscribe(new RecoverableReactorReceiver(this, actual, prefetch, creditFlowMode, retryPolicy)); } /** * Updates the disposition state of a message uniquely identified by the given delivery tag. * * @param deliveryTag delivery tag of message. * @param deliveryState Delivery state of message. * * @return A Mono that completes when the state is successfully updated and acknowledged by message broker. */ public Mono<Void> updateDisposition(String deliveryTag, DeliveryState deliveryState) { final DispositionFunction function = this.updateDispositionFunc; return function.updateDisposition(deliveryTag, deliveryState); } /** * The callback invoked when next receiver is attached to the messaging entity from which this message-flux * instance stream messages. There will be only one receiver at a time, and this callback delivers the reference * to the function to disposition messages that arrives in the new receiver. * * @param updateDispositionFunc the function to disposition messages delivered by the current backing receiver. */ void onNextUpdateDispositionFunction(DispositionFunction updateDispositionFunc) { this.updateDispositionFunc = updateDispositionFunc; } /** * Represents a function that accepts delivery tag and disposition state {@link DeliveryState} to set for the message * identified by that delivery tag. The function returns {@link Mono} representing the outcome of the disposition * operation attempted. */ @FunctionalInterface private interface DispositionFunction { /** * Indicate that the disposition cannot be attempted as there is no backing receiver link to perform the operation. */ DispositionFunction NO_DISPOSITION = (t, s) -> Mono.error(new IllegalStateException("Cannot update disposition as no receive-link is established.")); /** * Updates the disposition state of a message uniquely identified by the given delivery tag. * * @param deliveryTag delivery tag of message. * @param deliveryState Delivery state of message. * * @return A Mono that completes when the state is successfully updated and acknowledged by message broker. */ Mono<Void> updateDisposition(String deliveryTag, DeliveryState deliveryState); } /** * The underlying consumer and producer extension of the message-flux operator. The consuming side processes events * (about new receiver, terminal signals) from the upstream and events (messages, terminal signals) from * the current receiver. The producing side publishes the messages to message-flux's downstream. The type has * a recovery mechanism to obtain a new receiver from upstream upon the current receiver's termination. * Recoveries happen underneath while the messages flow transparently downstream. The type can terminate downstream * if the upstream terminates, the recovery path encounters a non-retriable error (i.e., the current receiver * terminated with a non-retriable error), or recovery retries exhaust. */ private static final class RecoverableReactorReceiver implements CoreSubscriber<AmqpReceiveLink>, Subscription { private final MediatorHolder mediatorHolder = new MediatorHolder(); private final MessageFlux parent; private final int prefetch; private final CreditFlowMode creditFlowMode; private final AmqpRetryPolicy retryPolicy; private final ClientLogger logger; private final AtomicInteger retryAttempts = new AtomicInteger(); private final CoreSubscriber<? super Message> messageSubscriber; private Subscription upstream; private volatile long requested; @SuppressWarnings("rawtypes") private static final AtomicLongFieldUpdater<RecoverableReactorReceiver> REQUESTED = AtomicLongFieldUpdater.newUpdater(RecoverableReactorReceiver.class, "requested"); private volatile int wip; @SuppressWarnings("rawtypes") private static final AtomicIntegerFieldUpdater<RecoverableReactorReceiver> WIP = AtomicIntegerFieldUpdater.newUpdater(RecoverableReactorReceiver.class, "wip"); private volatile boolean done; private volatile boolean cancelled; private volatile Throwable error; @SuppressWarnings("rawtypes") private static final AtomicReferenceFieldUpdater<RecoverableReactorReceiver, Throwable> ERROR = AtomicReferenceFieldUpdater.newUpdater(RecoverableReactorReceiver.class, Throwable.class, "error"); /** * Create a recoverable-receiver that supports the message-flux to stream messages from the receiver attached * to a messaging entity to the message-flux's downstream subscriber and recover from receiver termination. * * @param parent the parent message-flux. * @param messageSubscriber the message-flux's downstream subscriber to notify the events. * @param prefetch the number of messages that the operator should prefetch from the messaging entity * (for a less chatty network and faster message processing on the client). * @param creditFlowMode the mode indicating how to compute the credit and when to send it to the broker. * @param retryPolicy the retry policy to use to recover from receiver termination. */ RecoverableReactorReceiver(MessageFlux parent, CoreSubscriber<? super Message> messageSubscriber, int prefetch, CreditFlowMode creditFlowMode, AmqpRetryPolicy retryPolicy) { this.parent = parent; this.messageSubscriber = messageSubscriber; this.prefetch = prefetch; this.creditFlowMode = creditFlowMode; this.retryPolicy = retryPolicy; this.logger = parent.logger; } /** * Invoked by the upstream in response to message-flux subscribing to it. * * @param s the subscription handle for requesting receivers from the upstream or terminating upstream * through cancellation when it is no longer needed. */ @Override public void onSubscribe(Subscription s) { if (Operators.validate(upstream, s)) { upstream = s; messageSubscriber.onSubscribe(this); s.request(1); } } /** * Invoked by the upstream to deliver new receiver. * * @param receiver the new receiver. */ @Override public void onNext(AmqpReceiveLink receiver) { if (done) { receiver.closeAsync().subscribe(); Operators.onNextDropped(receiver, messageSubscriber.currentContext()); return; } final ReactorReceiverMediator mediator = new ReactorReceiverMediator(this, receiver, prefetch, creditFlowMode, logger); if (mediatorHolder.trySet(mediator)) { mediator.onParentReady(); } else { logger.atWarning() .addKeyValue("oldLinkName", mediatorHolder.getLinkName()) .addKeyValue(LINK_NAME_KEY, receiver.getLinkName()) .addKeyValue(ENTITY_PATH_KEY, receiver.getEntityPath()) .log("Got a AmqpReceiveLink when the MessageFlux is already terminated."); receiver.closeAsync().subscribe(); Operators.onDiscard(receiver, messageSubscriber.currentContext()); } } /** * Signals operator termination with error. * <ul> * <li>Invoked by the upstream when it errors, or</li> * <li>Invoked by the drain-loop when it detects a 'non-retriable or retry exhaust' error, or </li> * <li>Invoked by the drain-loop when it detects the receiver signaled terminal completion and retry * is disabled (i.e., NULL_RETRY_POLICY is set).</li> * </ul> * * @param e the error signaled. */ @Override public void onError(Throwable e) { if (done) { Operators.onErrorDropped(e, messageSubscriber.currentContext()); return; } if (Exceptions.addThrowable(ERROR, this, e)) { done = true; final String logMessage; if (retryPolicy == NULL_RETRY_POLICY) { logMessage = "Terminal error signal from Upstream|Receiver arrived at MessageFlux."; } else { logMessage = "Terminal error signal from Upstream|RetryLoop arrived at MessageFlux."; } mediatorHolder.withReceiverInfo(logger.atWarning()).log(logMessage, e); drain(null); } else { Operators.onErrorDropped(e, messageSubscriber.currentContext()); } } /** * Signals operator termination with completion. * <ul> * <li>Invoked by the upstream when it completes, or</li> * <li>Invoked by the the drain-loop when it detects first receiver signaled terminal completion and retry * is disabled (i.e., NULL_RETRY_POLICY is set).</li> * </ul> */ @Override public void onComplete() { if (done) { return; } done = true; final String logMessage; if (retryPolicy == NULL_RETRY_POLICY) { logMessage = "Terminal completion signal from Upstream|Receiver arrived at MessageFlux."; } else { logMessage = "Terminal completion signal from Upstream arrived at MessageFlux."; } mediatorHolder.withReceiverInfo(logger.atWarning()).log(logMessage); drain(null); } /** * Invoked by the downstream message subscriber to signal the demand for messages. Whatever has been * requested can be sent downstream, so only signal the demand for what can be safely handled. * No messages will be sent downstream until the demand is signaled. * * @param n the number of messages to send to downstream. */ @Override public void request(long n) { if (Operators.validate(n)) { Operators.addCap(REQUESTED, this, n); drain(null); } } /** * Invoked by downstream to signal termination by cancellation. */ @Override public void cancel() { if (cancelled) { return; } cancelled = true; mediatorHolder.withReceiverInfo(logger.atWarning()) .log("Downstream cancellation signal arrived at MessageFlux."); if (WIP.getAndIncrement(this) == 0) { upstream.cancel(); mediatorHolder.freeze(); } } /** * Invoked by the new mediator when it is ready to be used. A mediator constructed in 'onNext' moves * to ready state when its backing receiver is active. * * @param updateDispositionFunc the function to disposition messages from mediator's backing receiver. */ void onMediatorReady(DispositionFunction updateDispositionFunc) { retryAttempts.set(0); parent.onNextUpdateDispositionFunction(updateDispositionFunc); drain(null); } /** * The serialized entry point to drain-loop. * * @param dataSignal the message to drop if the operator is terminated by cancellation. */ void drain(Message dataSignal) { if (WIP.getAndIncrement(this) != 0) { if (dataSignal != null && cancelled) { Operators.onDiscard(dataSignal, messageSubscriber.currentContext()); } return; } drainLoop(); } /** * The serialized drain-loop (implementation patterns inspired from RxJava, Reactor Operators). * Reference: 'Operator Concurrency Primitives' series https: */ private void drainLoop() { int missed = 1; CoreSubscriber<? super Message> downstream = messageSubscriber; for (; ;) { boolean d = done; ReactorReceiverMediator mediator = mediatorHolder.mediator; boolean hasMediator = mediator != null; if (terminateIfCancelled(downstream, null)) { return; } if (terminateIfErrorOrCompletionSignaled(d, downstream, null)) { return; } long r = requested; long emitted = 0L; boolean mediatorTerminatedAndDrained = false; if (r != 0L && hasMediator) { Queue<Message> q = mediator.queue; while (emitted != r) { Message message = q.poll(); if (terminateIfCancelled(downstream, message)) { return; } if (terminateIfErrorOrCompletionSignaled(done, downstream, message)) { return; } boolean empty = message == null; if (empty && mediator.done) { mediatorTerminatedAndDrained = true; break; } if (empty) { break; } messageSubscriber.onNext(message); emitted++; } if (emitted == r) { if (mediator.queue.isEmpty() && mediator.done) { mediatorTerminatedAndDrained = true; } } if (emitted != 0 && r != Long.MAX_VALUE) { r = REQUESTED.addAndGet(this, -emitted); } mediator.update(r, emitted); } if (r == 0L && hasMediator) { if (terminateIfCancelled(downstream, null)) { return; } if (terminateIfErrorOrCompletionSignaled(done, downstream, null)) { return; } if (mediator.queue.isEmpty() && mediator.done) { mediatorTerminatedAndDrained = true; } } if (mediatorTerminatedAndDrained && !mediator.isRetryInitiated) { mediator.isRetryInitiated = true; mediator.closeAsync().subscribe(); setTerminationSignalOrScheduleNextMediatorRequest(mediator.error, downstream, mediatorHolder); } missed = WIP.addAndGet(this, -missed); if (missed == 0) { break; } } } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p> * See if downstream signaled cancellation to terminate the operator, if so, react to the cancellation. * </p> * @param downstream the downstream. * @param messageDropped the message that gets dropped if cancellation was signaled. * @return true if canceled, false otherwise. */ private boolean terminateIfCancelled(CoreSubscriber<? super Message> downstream, Message messageDropped) { if (cancelled) { Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); return true; } return false; } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p> * See if there is a pending signal for the operator termination with error or completion, if so, react to it * by terminating downstream. * </p> * * @param d indicate if the operator termination was signaled. * @param downstream the downstream. * @param messageDropped the message that gets dropped if termination happened. * @return true if terminated, false otherwise. */ private boolean terminateIfErrorOrCompletionSignaled(boolean d, CoreSubscriber<? super Message> downstream, Message messageDropped) { if (d) { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); Throwable e = error; if (e != null && e != Exceptions.TERMINATED) { e = Exceptions.terminate(ERROR, this); Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); logBuilder.log("MessageFlux reached a terminal error-state, signaling it downstream.", e); downstream.onError(e); return true; } Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); logBuilder.log("MessageFlux reached a terminal completion-state, signaling it downstream."); downstream.onComplete(); return true; } return false; } /** * * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p></p> * 1. When retry is enabled (i.e., NULL_RETRY_POLICY is not set) then schedule request (retry) for the next mediator if * <ul> * <li>the operator is not in a termination signaled state,</li> * <li> and * <ul> * <li>there is no error Or</li> * <li>error is retriable and the retry is not exhausted.</li> * </ul> * </li> * </ul> * 2. If retry is enabled (i.e., NULL_RETRY_POLICY is not set) and there is 'non-retriable or retry exhaust' * error, then set an error signal for the drain-loop to terminate the operator. * <br/> * 3. If retry is disabled (i.e., NULL_RETRY_POLICY is set), then set an error signal (if first receiver error-ed) * or completion signal (if first receiver completed) for the drain-loop to terminate the operator. * <p></p> * @param error the error that leads to error-ed termination of the last mediator or {@code null} * if terminated with completion. * @param downstream the downstream. * @param mediatorHolder the mediator holder. */ private void setTerminationSignalOrScheduleNextMediatorRequest(Throwable error, CoreSubscriber<? super Message> downstream, MediatorHolder mediatorHolder) { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); if (cancelled || done) { logBuilder.log("MessageFlux reached terminal-state [done:{}, cancelled:{}].", done, cancelled); return; } if (retryPolicy == NULL_RETRY_POLICY) { if (error == null) { onComplete(); } else { onError(error); } return; } final Duration delay; if (error == null) { delay = Duration.ofSeconds(1); logBuilder.addKeyValue("retryAfter", delay.toMillis()) .log("Current mediator reached terminal completion-state (retriable:true)."); } else { final int attempt = retryAttempts.incrementAndGet(); delay = retryPolicy.calculateRetryDelay(error, attempt); if (delay != null) { logBuilder.addKeyValue("attempt", attempt) .addKeyValue("retryAfter", delay.toMillis()) .log("Current mediator reached terminal error-state (retriable:true).", error); } else { logBuilder.addKeyValue("attempt", attempt) .log("Current mediator reached terminal error-state (retriable:false) Or MessageFlux retries exhausted.", error); onError(error); return; } } try { scheduleNextMediatorRequest(delay, mediatorHolder); } catch (RejectedExecutionException ree) { final RuntimeException e = Operators.onRejectedExecution(ree, downstream.currentContext()); mediatorHolder.withReceiverInfo(logger.atWarning()) .log("Unable to schedule a request for a new mediator (retriable:false).", e); onError(e); } } /** * Schedule a task to request a new mediator. * * @param delay the backoff duration before requesting the next mediator. * @param mediatorHolder the mediator holder. * @throws RejectedExecutionException if the scheduler is unable to schedule the task. */ private void scheduleNextMediatorRequest(Duration delay, MediatorHolder mediatorHolder) { final Runnable task = () -> { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); if (cancelled || done) { logBuilder.log("During the backoff, MessageFlux reached terminal-state [done:{}, cancelled:{}].", done, cancelled); return; } logBuilder.log("Requesting a new mediator."); upstream.request(1); }; mediatorHolder.nextMediatorRequestDisposable = Schedulers.parallel().schedule(task, delay.toMillis(), TimeUnit.MILLISECONDS); } } /** * The mediator that coordinates between {@link RecoverableReactorReceiver} and a receiver {@link AmqpReceiveLink}. */ private static final class ReactorReceiverMediator implements AsyncCloseable, CoreSubscriber<Message>, Subscription { private static final Subscription CANCELLED_SUBSCRIPTION = Operators.cancelledSubscription(); private final RecoverableReactorReceiver parent; private final AmqpReceiveLink receiver; private final int prefetch; private final CreditFlowMode creditFlowMode; private final ClientLogger logger; private final Disposable.Composite endpointStateDisposable = Disposables.composite(); private CreditAccountingStrategy creditAccounting; private volatile boolean ready; private volatile Subscription s; @SuppressWarnings("rawtypes") private static final AtomicReferenceFieldUpdater<ReactorReceiverMediator, Subscription> S = AtomicReferenceFieldUpdater.newUpdater(ReactorReceiverMediator.class, Subscription.class, "s"); volatile Throwable error; @SuppressWarnings("rawtypes") static final AtomicReferenceFieldUpdater<ReactorReceiverMediator, Throwable> ERROR = AtomicReferenceFieldUpdater.newUpdater(ReactorReceiverMediator.class, Throwable.class, "error"); /** * The flag indicating if the mediator is terminated by completion or error. */ volatile boolean done; /** * The drain loop iteration that first identifies the mediator as terminated (done == true) and * and drained (queue.isEmpty() == true) will initiate a retry to obtain the next mediator. While that retry * completion is pending, any request for messages from downstream may lead to further drain loop iterations; * the 'isRetryInitiated' flag ensures those drain loop iterations (those also see the mediator as terminated * and drained) will not initiate duplicate retries. */ volatile boolean isRetryInitiated; /** * The queue holding messages from the backing receiver's message publisher, waiting to be drained by * the drain-loop iterations. */ final Queue<Message> queue; /** * Create a mediator to channel events (messages, termination) from a receiver to recoverable-receiver. * * @param parent the recoverable-receiver (a.k.a. parent). * @param receiver the receiver backing the mediator. * @param prefetch the number of messages to prefetch using the receiver (for a less chatty network * and faster message processing on the client). * @param creditFlowMode the mode indicating how to compute the credit and when to send it to the broker. */ ReactorReceiverMediator(RecoverableReactorReceiver parent, AmqpReceiveLink receiver, int prefetch, CreditFlowMode creditFlowMode, ClientLogger logger) { this.parent = parent; this.receiver = receiver; this.prefetch = prefetch; this.creditFlowMode = creditFlowMode; this.logger = logger; this.queue = Queues.<Message>get(Integer.MAX_VALUE).get(); } /** * Invoked by the parent {@link RecoverableReactorReceiver} when it is ready to use this new mediator * (The mediator facilitate communication between the parent and the new receiver ({@link AmqpReceiveLink}) that * mediator wraps). In response, this mediator notifies the parent about its readiness by invoking * {@link RecoverableReactorReceiver */ void onParentReady() { updateLogWithReceiverId(logger.atWarning()).log("Setting next mediator and waiting for activation."); receiver.receive().subscribe(this); final Disposable endpointDisposable = receiver.getEndpointStates() .filter(s -> s == AmqpEndpointState.ACTIVE) .publishOn(ReceiversPumpingScheduler.instance()) .subscribe(state -> { assert state == AmqpEndpointState.ACTIVE; if (!ready) { updateLogWithReceiverId(logger.atWarning()).log("The mediator is active."); ready = true; parent.onMediatorReady(this::updateDisposition); } }, e -> { updateLogWithReceiverId(logger.atWarning()).log("Receiver emitted terminal error.", e); onLinkError(e); }, () -> { updateLogWithReceiverId(logger.atWarning()).log("Receiver emitted terminal completion."); onLinkComplete(); }); endpointStateDisposable.add(endpointDisposable); } /** * Invoked in response to the subscription to the receiver's message publisher. * * @param s the subscription to request messages from the receiver's message publisher and terminate * that publisher through cancellation when it is no longer needed. */ @Override public void onSubscribe(Subscription s) { if (Operators.setOnce(S, this, s)) { switch (creditFlowMode) { case RequestDriven: creditAccounting = new RequestDrivenCreditAccountingStrategy(receiver, s, prefetch, logger); break; case EmissionDriven: creditAccounting = new EmissionDrivenCreditAccountingStrategy(receiver, s, prefetch, logger); break; default: throw new IllegalArgumentException("Unknown CreditFlowMode " + creditFlowMode); } } } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p> * Notify the latest view of the downstream request and messages emitted by the emitter-loop during * the last drain-loop iteration. * </p> * @param request the latest view of the downstream request. * @param emitted the number of messages emitted by the latest emitter-loop run. */ void update(long request, long emitted) { if (ready && !done) { creditAccounting.update(request, emitted); } } /** * Invoked by the receiver's message publisher to deliver a message. * * @param message the message. */ @Override public void onNext(Message message) { if (done) { Operators.onNextDropped(message, parent.currentContext()); return; } if (s == Operators.cancelledSubscription()) { Operators.onDiscard(message, parent.currentContext()); return; } if (queue.offer(message)) { parent.drain(message); } else { Operators.onOperatorError(this, Exceptions.failWithOverflow(Exceptions.BACKPRESSURE_ERROR_QUEUE_FULL), parent.messageSubscriber.currentContext()); Operators.onDiscard(message, parent.messageSubscriber.currentContext()); done = true; parent.drain(message); } } @Override public void onError(Throwable e) { } /** * Invoked by the receiver's endpoint publisher to signal mediator termination with an error. * * @param e the error signaled. */ private void onLinkError(Throwable e) { if (done) { Operators.onErrorDropped(e, parent.messageSubscriber.currentContext()); return; } if (ERROR.compareAndSet(this, null, e)) { done = true; parent.drain(null); } else { done = true; Operators.onErrorDropped(e, parent.messageSubscriber.currentContext()); } } @Override public void onComplete() { } /** * Invoked by the receiver's endpoint publisher to signal mediator termination with completion. */ private void onLinkComplete() { if (done) { return; } done = true; parent.drain(null); } @Override public void request(long n) { throw new IllegalStateException("The request accounting must be through update(,)."); } @Override public void cancel() { if (Operators.terminate(S, this)) { Operators.onDiscardQueueWithClear(queue, parent.currentContext(), null); } endpointStateDisposable.dispose(); } /** * Close the mediator. Closing is triggered in the following cases - * <ul> * <li>When {@link RecoverableReactorReceiver} switches to a new (i.e., next) mediator, it closes the current mediator.</li> * <li>When {@link RecoverableReactorReceiver} terminates (hence {@link MessageFlux}) due to * <ul> * <li>downstream cancellation or</li> * <li>upstream termination with error or completion or</li> * <li>retry-exhaust-error or non-retriable-error or</li> * <li>termination of receiver with error or completion when NULL_RETRY_POLICY is set,</li> * </ul> * it closes the current (i.e., last) mediator. </li> * </ul> */ @Override public Mono<Void> closeAsync() { cancel(); return receiver.closeAsync(); } /** * Updates the disposition state of a message uniquely identified by the given delivery tag. * * @param deliveryTag delivery tag of message. * @param deliveryState Delivery state of message. * * @return A Mono that completes when the state is successfully updated and acknowledged by message broker. */ private Mono<Void> updateDisposition(String deliveryTag, DeliveryState deliveryState) { if (done || s == CANCELLED_SUBSCRIPTION) { final String state = String.format("[link.done:%b link.cancelled:%b parent.done:%b parent.cancelled:%b]", done, s == CANCELLED_SUBSCRIPTION, parent.done, parent.cancelled); final DeliveryNotOnLinkException dispositionError = DeliveryNotOnLinkException.linkClosed(deliveryTag, deliveryState); final Throwable receiverError = error; if (receiverError != null) { dispositionError.addSuppressed(receiverError); } final Throwable upstreamError = parent.error; if (upstreamError != null) { dispositionError.addSuppressed(upstreamError); } return monoError(logger.atError() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue(DELIVERY_STATE_KEY, deliveryState) .addKeyValue("messageFluxState", state), dispositionError); } return receiver.updateDisposition(deliveryTag, deliveryState); } private LoggingEventBuilder updateLogWithReceiverId(LoggingEventBuilder builder) { return builder .addKeyValue(CONNECTION_ID_KEY, receiver.getConnectionId()) .addKeyValue(LINK_NAME_KEY, receiver.getLinkName()) .addKeyValue(ENTITY_PATH_KEY, receiver.getEntityPath()); } } /** * A type that supports atomically setting a mediator and disposing of the last set mediator upon freezing. * Once frozen, further attempt to set the mediator will be rejected. The object of this type holds * the current mediator that the drain-loop access to read events from the receiver (backing the mediator). */ private static final class MediatorHolder { private boolean isFrozen; volatile ReactorReceiverMediator mediator; volatile Disposable nextMediatorRequestDisposable; /** * Try to set the current mediator for the drain-loop. * * @param mediator the mediator. * @return true if the mediator is set successfully, false if the attempt to set is rejected due * to the holder in the frozen state. */ boolean trySet(ReactorReceiverMediator mediator) { synchronized (this) { if (isFrozen) { return false; } this.mediator = mediator; return true; } } /** * Freeze the holder to dispose of the current mediator and any resources it tracks; no further * mediator can be set once frozen. Freezing happens when the message-flux operator is terminated. */ void freeze() { final Disposable d; final ReactorReceiverMediator m; synchronized (this) { if (isFrozen) { return; } d = nextMediatorRequestDisposable; m = this.mediator; isFrozen = true; } if (d != null) { d.dispose(); } if (m != null) { m.closeAsync().subscribe(); } } String getLinkName() { final ReactorReceiverMediator m = mediator; return m != null ? m.receiver.getLinkName() : null; } /** * annotate the log builder with the receiver info (connectionId:linkName:entityPath) if the mediator has * receiver set, else no-op. * * @param builder the log builder to annotate. * @return the log builder annotated with receiver info. */ LoggingEventBuilder withReceiverInfo(LoggingEventBuilder builder) { final ReactorReceiverMediator m = mediator; if (m != null) { return builder.addKeyValue(CONNECTION_ID_KEY, m.receiver.getConnectionId()) .addKeyValue(LINK_NAME_KEY, m.receiver.getLinkName()) .addKeyValue(ENTITY_PATH_KEY, m.receiver.getEntityPath()); } return builder; } } }
Since MessageFlux abstracts rolling over multiple Receivers, the MessageFlux needs to be notified with the function on the current receiver to support disposition operation, so the notification flow: "Mediator (Receiver) -> RecoverableReactorReceiver -> MessageFlux". I’ve some ideas to make it more readable, will update this. **[DONE]**
private void drainLoop() { int missed = 1; CoreSubscriber<? super Message> downstream = messageSubscriber; for (; ;) { boolean d = done; ReactorReceiverMediator mediator = mediatorHolder.mediator; boolean hasMediator = mediator != null; if (terminateIfCancelled(downstream, null)) { return; } if (terminateIfErrorOrCompletionSignaled(d, downstream, null)) { return; } long r = requested; long emitted = 0L; boolean mediatorTerminatedAndDrained = false; if (r != 0L && hasMediator) { Queue<Message> q = mediator.queue; while (emitted != r) { Message message = q.poll(); if (terminateIfCancelled(downstream, message)) { return; } if (terminateIfErrorOrCompletionSignaled(done, downstream, message)) { return; } boolean empty = message == null; if (empty && mediator.done) { mediatorTerminatedAndDrained = true; break; } if (empty) { break; } messageSubscriber.onNext(message); emitted++; } if (emitted == r) { if (mediator.queue.isEmpty() && mediator.done) { mediatorTerminatedAndDrained = true; } } if (emitted != 0 && r != Long.MAX_VALUE) { r = REQUESTED.addAndGet(this, -emitted); } mediator.update(r, emitted); } if (r == 0L && hasMediator) { if (terminateIfCancelled(downstream, null)) { return; } if (terminateIfErrorOrCompletionSignaled(done, downstream, null)) { return; } if (mediator.queue.isEmpty() && mediator.done) { mediatorTerminatedAndDrained = true; } } if (mediatorTerminatedAndDrained && !mediator.isRetryInitiated) { mediator.isRetryInitiated = true; mediator.closeAsync().subscribe(); setTerminationSignalOrScheduleNextMediatorRequest(mediator.error, downstream, mediatorHolder); } missed = WIP.addAndGet(this, -missed); if (missed == 0) { break; } } }
boolean d = done;
private void drainLoop() { int missed = 1; CoreSubscriber<? super Message> downstream = messageSubscriber; for (; ;) { boolean d = done; ReactorReceiverMediator mediator = mediatorHolder.mediator; boolean hasMediator = mediator != null; if (terminateIfCancelled(downstream, null)) { return; } if (terminateIfErrorOrCompletionSignaled(d, downstream, null)) { return; } long r = requested; long emitted = 0L; boolean mediatorTerminatedAndDrained = false; if (r != 0L && hasMediator) { Queue<Message> q = mediator.queue; while (emitted != r) { Message message = q.poll(); if (terminateIfCancelled(downstream, message)) { return; } if (terminateIfErrorOrCompletionSignaled(done, downstream, message)) { return; } boolean empty = message == null; if (empty && mediator.done) { mediatorTerminatedAndDrained = true; break; } if (empty) { break; } messageSubscriber.onNext(message); emitted++; } if (emitted == r) { if (mediator.queue.isEmpty() && mediator.done) { mediatorTerminatedAndDrained = true; } } if (emitted != 0 && r != Long.MAX_VALUE) { r = REQUESTED.addAndGet(this, -emitted); } mediator.update(r, emitted); } if (r == 0L && hasMediator) { if (terminateIfCancelled(downstream, null)) { return; } if (terminateIfErrorOrCompletionSignaled(done, downstream, null)) { return; } if (mediator.queue.isEmpty() && mediator.done) { mediatorTerminatedAndDrained = true; } } if (mediatorTerminatedAndDrained && !mediator.isRetryInitiated) { mediator.isRetryInitiated = true; mediator.closeAsync().subscribe(); setTerminationSignalOrScheduleNextMediatorRequest(mediator.error, downstream, mediatorHolder); } missed = WIP.addAndGet(this, -missed); if (missed == 0) { break; } } }
class RecoverableReactorReceiver implements CoreSubscriber<AmqpReceiveLink>, Subscription { private final MediatorHolder mediatorHolder = new MediatorHolder(); private final MessageFlux parent; private final int prefetch; private final CreditFlowMode creditFlowMode; private final AmqpRetryPolicy retryPolicy; private final ClientLogger logger; private final AtomicInteger retryAttempts = new AtomicInteger(); private final CoreSubscriber<? super Message> messageSubscriber; private Subscription upstream; private volatile long requested; @SuppressWarnings("rawtypes") private static final AtomicLongFieldUpdater<RecoverableReactorReceiver> REQUESTED = AtomicLongFieldUpdater.newUpdater(RecoverableReactorReceiver.class, "requested"); private volatile int wip; @SuppressWarnings("rawtypes") private static final AtomicIntegerFieldUpdater<RecoverableReactorReceiver> WIP = AtomicIntegerFieldUpdater.newUpdater(RecoverableReactorReceiver.class, "wip"); private volatile boolean done; private volatile boolean cancelled; volatile Throwable error; @SuppressWarnings("rawtypes") static final AtomicReferenceFieldUpdater<RecoverableReactorReceiver, Throwable> ERROR = AtomicReferenceFieldUpdater.newUpdater(RecoverableReactorReceiver.class, Throwable.class, "error"); /** * Create a recoverable-receiver that supports the message-flux to stream messages from the receiver attached * to a messaging entity to the message-flux's downstream subscriber and recover from receiver termination. * * @param parent the parent message-flux. * @param messageSubscriber the message-flux's downstream subscriber to notify the events. * @param prefetch the number of messages that the operator should prefetch from the messaging entity * (for a less chatty network and faster message processing on the client). * @param creditFlowMode the mode indicating how to compute the credit and when to send it to the broker. * @param retryPolicy the retry policy to use to recover from receiver termination. */ RecoverableReactorReceiver(MessageFlux parent, CoreSubscriber<? super Message> messageSubscriber, int prefetch, CreditFlowMode creditFlowMode, AmqpRetryPolicy retryPolicy) { this.parent = parent; this.messageSubscriber = messageSubscriber; this.prefetch = prefetch; this.creditFlowMode = creditFlowMode; this.retryPolicy = retryPolicy; this.logger = parent.logger; } /** * Invoked by the upstream in response to message-flux subscribing to it. * * @param s the subscription handle for requesting receivers from the upstream or terminating upstream * through cancellation when it is no longer needed. */ @Override public void onSubscribe(Subscription s) { if (Operators.validate(upstream, s)) { upstream = s; messageSubscriber.onSubscribe(this); s.request(1); } } /** * Invoked by the upstream to deliver new receiver. * * @param receiver the new receiver. */ @Override public void onNext(AmqpReceiveLink receiver) { if (done) { receiver.closeAsync().subscribe(); Operators.onNextDropped(receiver, messageSubscriber.currentContext()); return; } final ReactorReceiverMediator mediator = new ReactorReceiverMediator(this, receiver, prefetch, creditFlowMode, logger); if (mediatorHolder.trySet(mediator)) { mediator.onParentReady(); } else { logger.atWarning() .addKeyValue("oldLinkName", mediatorHolder.getLinkName()) .addKeyValue(LINK_NAME_KEY, receiver.getLinkName()) .addKeyValue(ENTITY_PATH_KEY, receiver.getEntityPath()) .log("Got a AmqpReceiveLink when the MessageFlux is already terminated."); receiver.closeAsync().subscribe(); Operators.onDiscard(receiver, messageSubscriber.currentContext()); } } /** * Signals operator termination with error. * <ul> * <li>Invoked by the upstream when it errors, or</li> * <li>Invoked by the drain-loop when it detects a 'non-retriable or retry exhaust' error, or </li> * <li>Invoked by the drain-loop when it detects the receiver signaled terminal completion and retry * is disabled (i.e., NULL_RETRY_POLICY is set).</li> * </ul> * * @param e the error signaled. */ @Override public void onError(Throwable e) { if (done) { Operators.onErrorDropped(e, messageSubscriber.currentContext()); return; } if (Exceptions.addThrowable(ERROR, this, e)) { done = true; final String logMessage; if (retryPolicy == NULL_RETRY_POLICY) { logMessage = "Terminal error signal from Upstream|Receiver arrived at MessageFlux."; } else { logMessage = "Terminal error signal from Upstream|RetryLoop arrived at MessageFlux."; } mediatorHolder.withReceiverInfo(logger.atWarning()).log(logMessage, e); drain(null); } else { Operators.onErrorDropped(e, messageSubscriber.currentContext()); } } /** * Signals operator termination with completion. * <ul> * <li>Invoked by the upstream when it completes, or</li> * <li>Invoked by the the drain-loop when it detects first receiver signaled terminal completion and retry * is disabled (i.e., NULL_RETRY_POLICY is set).</li> * </ul> */ @Override public void onComplete() { if (done) { return; } done = true; final String logMessage; if (retryPolicy == NULL_RETRY_POLICY) { logMessage = "Terminal completion signal from Upstream|Receiver arrived at MessageFlux."; } else { logMessage = "Terminal completion signal from Upstream arrived at MessageFlux."; } mediatorHolder.withReceiverInfo(logger.atWarning()).log(logMessage); drain(null); } /** * Invoked by the downstream message subscriber to signal the demand for messages. Whatever has been * requested can be sent downstream, so only signal the demand for what can be safely handled. * No messages will be sent downstream until the demand is signaled. * * @param n the number of messages to send to downstream. */ @Override public void request(long n) { if (Operators.validate(n)) { Operators.addCap(REQUESTED, this, n); drain(null); } } /** * Invoked by downstream to signal termination by cancellation. */ @Override public void cancel() { if (cancelled) { return; } cancelled = true; mediatorHolder.withReceiverInfo(logger.atWarning()) .log("Downstream cancellation signal arrived at MessageFlux."); if (WIP.getAndIncrement(this) == 0) { upstream.cancel(); mediatorHolder.freeze(); } } /** * Invoked by the new mediator when it is ready to be used. A mediator constructed in 'onNext' moves * to ready state when its backing receiver is active. * * @param updateDispositionFunc the function to disposition messages from mediator's backing receiver. */ void onMediatorReady(BiFunction<String, DeliveryState, Mono<Void>> updateDispositionFunc) { retryAttempts.set(0); parent.onNextUpdateDispositionFunction(updateDispositionFunc); drain(null); } /** * The serialized entry point to drain-loop. * * @param dataSignal the message to drop if the operator is terminated by cancellation. */ void drain(Message dataSignal) { if (WIP.getAndIncrement(this) != 0) { if (dataSignal != null && cancelled) { Operators.onDiscard(dataSignal, messageSubscriber.currentContext()); } return; } drainLoop(); } /** * The serialized drain-loop (implementation patterns inspired from RxJava, Reactor Operators). * Reference: 'Operator Concurrency Primitives' series https: */ /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p/> * See if downstream signaled cancellation to terminate the operator, if so, react to the cancellation. * * @param downstream the downstream. * @param messageDropped the message that gets dropped if cancellation was signaled. * @return true if canceled, false otherwise. */ private boolean terminateIfCancelled(CoreSubscriber<? super Message> downstream, Message messageDropped) { if (cancelled) { Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); return true; } return false; } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p/> * See if there is a pending signal for the operator termination with error or completion, if so, react to it * by terminating downstream. * * @param d indicate if the operator termination was signaled. * @param downstream the downstream. * @param messageDropped the message that gets dropped if termination happened. * @return true if terminated, false otherwise. */ private boolean terminateIfErrorOrCompletionSignaled(boolean d, CoreSubscriber<? super Message> downstream, Message messageDropped) { if (d) { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); Throwable e = error; if (e != null && e != Exceptions.TERMINATED) { e = Exceptions.terminate(ERROR, this); Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); logBuilder.log("MessageFlux reached a terminal error-state, signaling it downstream.", e); downstream.onError(e); return true; } Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); logBuilder.log("MessageFlux reached a terminal completion-state, signaling it downstream."); downstream.onComplete(); return true; } return false; } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p/> * 1. When retry is enabled (i.e., NULL_RETRY_POLICY is not set) then schedule request (retry) for the next mediator if * <ul> * <li>the operator is not in a termination signaled state,</li> * <li> and * <ul> * <li>there is no error Or</li> * <li>error is retriable and the retry is not exhausted.</li> * </ul> * </li> * </ul> * 2. If retry is enabled (i.e., NULL_RETRY_POLICY is not set) and there is 'non-retriable or retry exhaust' * error, then set an error signal for the drain-loop to terminate the operator. * <br/> * 3. If retry is disabled (i.e., NULL_RETRY_POLICY is set), then set an error signal (if first receiver error-ed) * or completion signal (if first receiver completed) for the drain-loop to terminate the operator. * <br/> * @param error the error that leads to error-ed termination of the last mediator or {@code null} * if terminated with completion. * @param downstream the downstream. * @param mediatorHolder the mediator holder. */ private void setTerminationSignalOrScheduleNextMediatorRequest(Throwable error, CoreSubscriber<? super Message> downstream, MediatorHolder mediatorHolder) { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); if (cancelled || done) { logBuilder.log("MessageFlux reached terminal-state [done:{}, cancelled:{}].", done, cancelled); return; } if (retryPolicy == NULL_RETRY_POLICY) { if (error == null) { onComplete(); } else { onError(error); } return; } final Duration delay; if (error == null) { delay = Duration.ofSeconds(1); logBuilder.addKeyValue("retryAfter", delay.toMillis()) .log("Current mediator reached terminal completion-state (retriable:true)."); } else { final int attempt = retryAttempts.incrementAndGet(); delay = retryPolicy.calculateRetryDelay(error, attempt); if (delay != null) { logBuilder.addKeyValue("attempt", attempt) .addKeyValue("retryAfter", delay.toMillis()) .log("Current mediator reached terminal error-state (retriable:true).", error); } else { logBuilder.addKeyValue("attempt", attempt) .log("Current mediator reached terminal error-state (retriable:false) Or MessageFlux retries exhausted.", error); onError(error); return; } } try { scheduleNextMediatorRequest(delay, mediatorHolder); } catch (RejectedExecutionException ree) { final RuntimeException e = Operators.onRejectedExecution(ree, downstream.currentContext()); mediatorHolder.withReceiverInfo(logger.atWarning()) .log("Unable to schedule a request for a new mediator (retriable:false).", e); onError(e); } } /** * Schedule a task to request a new mediator. * * @param delay the backoff duration before requesting the next mediator. * @param mediatorHolder the mediator holder. * @throws RejectedExecutionException if the scheduler is unable to schedule the task. */ private void scheduleNextMediatorRequest(Duration delay, MediatorHolder mediatorHolder) { final Runnable task = () -> { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); if (cancelled || done) { logBuilder.log("During the backoff, MessageFlux reached terminal-state [done:{}, cancelled:{}].", done, cancelled); return; } logBuilder.log("Requesting a new mediator."); upstream.request(1); }; mediatorHolder.nextMediatorRequestDisposable = Schedulers.parallel().schedule(task, delay.toMillis(), TimeUnit.MILLISECONDS); } }
class RecoverableReactorReceiver implements CoreSubscriber<AmqpReceiveLink>, Subscription { private final MediatorHolder mediatorHolder = new MediatorHolder(); private final MessageFlux parent; private final int prefetch; private final CreditFlowMode creditFlowMode; private final AmqpRetryPolicy retryPolicy; private final ClientLogger logger; private final AtomicInteger retryAttempts = new AtomicInteger(); private final CoreSubscriber<? super Message> messageSubscriber; private Subscription upstream; private volatile long requested; @SuppressWarnings("rawtypes") private static final AtomicLongFieldUpdater<RecoverableReactorReceiver> REQUESTED = AtomicLongFieldUpdater.newUpdater(RecoverableReactorReceiver.class, "requested"); private volatile int wip; @SuppressWarnings("rawtypes") private static final AtomicIntegerFieldUpdater<RecoverableReactorReceiver> WIP = AtomicIntegerFieldUpdater.newUpdater(RecoverableReactorReceiver.class, "wip"); private volatile boolean done; private volatile boolean cancelled; private volatile Throwable error; @SuppressWarnings("rawtypes") private static final AtomicReferenceFieldUpdater<RecoverableReactorReceiver, Throwable> ERROR = AtomicReferenceFieldUpdater.newUpdater(RecoverableReactorReceiver.class, Throwable.class, "error"); /** * Create a recoverable-receiver that supports the message-flux to stream messages from the receiver attached * to a messaging entity to the message-flux's downstream subscriber and recover from receiver termination. * * @param parent the parent message-flux. * @param messageSubscriber the message-flux's downstream subscriber to notify the events. * @param prefetch the number of messages that the operator should prefetch from the messaging entity * (for a less chatty network and faster message processing on the client). * @param creditFlowMode the mode indicating how to compute the credit and when to send it to the broker. * @param retryPolicy the retry policy to use to recover from receiver termination. */ RecoverableReactorReceiver(MessageFlux parent, CoreSubscriber<? super Message> messageSubscriber, int prefetch, CreditFlowMode creditFlowMode, AmqpRetryPolicy retryPolicy) { this.parent = parent; this.messageSubscriber = messageSubscriber; this.prefetch = prefetch; this.creditFlowMode = creditFlowMode; this.retryPolicy = retryPolicy; this.logger = parent.logger; } /** * Invoked by the upstream in response to message-flux subscribing to it. * * @param s the subscription handle for requesting receivers from the upstream or terminating upstream * through cancellation when it is no longer needed. */ @Override public void onSubscribe(Subscription s) { if (Operators.validate(upstream, s)) { upstream = s; messageSubscriber.onSubscribe(this); s.request(1); } } /** * Invoked by the upstream to deliver new receiver. * * @param receiver the new receiver. */ @Override public void onNext(AmqpReceiveLink receiver) { if (done) { receiver.closeAsync().subscribe(); Operators.onNextDropped(receiver, messageSubscriber.currentContext()); return; } final ReactorReceiverMediator mediator = new ReactorReceiverMediator(this, receiver, prefetch, creditFlowMode, logger); if (mediatorHolder.trySet(mediator)) { mediator.onParentReady(); } else { logger.atWarning() .addKeyValue("oldLinkName", mediatorHolder.getLinkName()) .addKeyValue(LINK_NAME_KEY, receiver.getLinkName()) .addKeyValue(ENTITY_PATH_KEY, receiver.getEntityPath()) .log("Got a AmqpReceiveLink when the MessageFlux is already terminated."); receiver.closeAsync().subscribe(); Operators.onDiscard(receiver, messageSubscriber.currentContext()); } } /** * Signals operator termination with error. * <ul> * <li>Invoked by the upstream when it errors, or</li> * <li>Invoked by the drain-loop when it detects a 'non-retriable or retry exhaust' error, or </li> * <li>Invoked by the drain-loop when it detects the receiver signaled terminal completion and retry * is disabled (i.e., NULL_RETRY_POLICY is set).</li> * </ul> * * @param e the error signaled. */ @Override public void onError(Throwable e) { if (done) { Operators.onErrorDropped(e, messageSubscriber.currentContext()); return; } if (Exceptions.addThrowable(ERROR, this, e)) { done = true; final String logMessage; if (retryPolicy == NULL_RETRY_POLICY) { logMessage = "Terminal error signal from Upstream|Receiver arrived at MessageFlux."; } else { logMessage = "Terminal error signal from Upstream|RetryLoop arrived at MessageFlux."; } mediatorHolder.withReceiverInfo(logger.atWarning()).log(logMessage, e); drain(null); } else { Operators.onErrorDropped(e, messageSubscriber.currentContext()); } } /** * Signals operator termination with completion. * <ul> * <li>Invoked by the upstream when it completes, or</li> * <li>Invoked by the the drain-loop when it detects first receiver signaled terminal completion and retry * is disabled (i.e., NULL_RETRY_POLICY is set).</li> * </ul> */ @Override public void onComplete() { if (done) { return; } done = true; final String logMessage; if (retryPolicy == NULL_RETRY_POLICY) { logMessage = "Terminal completion signal from Upstream|Receiver arrived at MessageFlux."; } else { logMessage = "Terminal completion signal from Upstream arrived at MessageFlux."; } mediatorHolder.withReceiverInfo(logger.atWarning()).log(logMessage); drain(null); } /** * Invoked by the downstream message subscriber to signal the demand for messages. Whatever has been * requested can be sent downstream, so only signal the demand for what can be safely handled. * No messages will be sent downstream until the demand is signaled. * * @param n the number of messages to send to downstream. */ @Override public void request(long n) { if (Operators.validate(n)) { Operators.addCap(REQUESTED, this, n); drain(null); } } /** * Invoked by downstream to signal termination by cancellation. */ @Override public void cancel() { if (cancelled) { return; } cancelled = true; mediatorHolder.withReceiverInfo(logger.atWarning()) .log("Downstream cancellation signal arrived at MessageFlux."); if (WIP.getAndIncrement(this) == 0) { upstream.cancel(); mediatorHolder.freeze(); } } /** * Invoked by the new mediator when it is ready to be used. A mediator constructed in 'onNext' moves * to ready state when its backing receiver is active. * * @param updateDispositionFunc the function to disposition messages from mediator's backing receiver. */ void onMediatorReady(DispositionFunction updateDispositionFunc) { retryAttempts.set(0); parent.onNextUpdateDispositionFunction(updateDispositionFunc); drain(null); } /** * The serialized entry point to drain-loop. * * @param dataSignal the message to drop if the operator is terminated by cancellation. */ void drain(Message dataSignal) { if (WIP.getAndIncrement(this) != 0) { if (dataSignal != null && cancelled) { Operators.onDiscard(dataSignal, messageSubscriber.currentContext()); } return; } drainLoop(); } /** * The serialized drain-loop (implementation patterns inspired from RxJava, Reactor Operators). * Reference: 'Operator Concurrency Primitives' series https: */ /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p> * See if downstream signaled cancellation to terminate the operator, if so, react to the cancellation. * </p> * @param downstream the downstream. * @param messageDropped the message that gets dropped if cancellation was signaled. * @return true if canceled, false otherwise. */ private boolean terminateIfCancelled(CoreSubscriber<? super Message> downstream, Message messageDropped) { if (cancelled) { Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); return true; } return false; } /** * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p> * See if there is a pending signal for the operator termination with error or completion, if so, react to it * by terminating downstream. * </p> * * @param d indicate if the operator termination was signaled. * @param downstream the downstream. * @param messageDropped the message that gets dropped if termination happened. * @return true if terminated, false otherwise. */ private boolean terminateIfErrorOrCompletionSignaled(boolean d, CoreSubscriber<? super Message> downstream, Message messageDropped) { if (d) { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); Throwable e = error; if (e != null && e != Exceptions.TERMINATED) { e = Exceptions.terminate(ERROR, this); Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); logBuilder.log("MessageFlux reached a terminal error-state, signaling it downstream.", e); downstream.onError(e); return true; } Operators.onDiscard(messageDropped, downstream.currentContext()); upstream.cancel(); mediatorHolder.freeze(); logBuilder.log("MessageFlux reached a terminal completion-state, signaling it downstream."); downstream.onComplete(); return true; } return false; } /** * * CONTRACT: Never invoke from the outside of serialized drain-loop. * <p></p> * 1. When retry is enabled (i.e., NULL_RETRY_POLICY is not set) then schedule request (retry) for the next mediator if * <ul> * <li>the operator is not in a termination signaled state,</li> * <li> and * <ul> * <li>there is no error Or</li> * <li>error is retriable and the retry is not exhausted.</li> * </ul> * </li> * </ul> * 2. If retry is enabled (i.e., NULL_RETRY_POLICY is not set) and there is 'non-retriable or retry exhaust' * error, then set an error signal for the drain-loop to terminate the operator. * <br/> * 3. If retry is disabled (i.e., NULL_RETRY_POLICY is set), then set an error signal (if first receiver error-ed) * or completion signal (if first receiver completed) for the drain-loop to terminate the operator. * <p></p> * @param error the error that leads to error-ed termination of the last mediator or {@code null} * if terminated with completion. * @param downstream the downstream. * @param mediatorHolder the mediator holder. */ private void setTerminationSignalOrScheduleNextMediatorRequest(Throwable error, CoreSubscriber<? super Message> downstream, MediatorHolder mediatorHolder) { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); if (cancelled || done) { logBuilder.log("MessageFlux reached terminal-state [done:{}, cancelled:{}].", done, cancelled); return; } if (retryPolicy == NULL_RETRY_POLICY) { if (error == null) { onComplete(); } else { onError(error); } return; } final Duration delay; if (error == null) { delay = Duration.ofSeconds(1); logBuilder.addKeyValue("retryAfter", delay.toMillis()) .log("Current mediator reached terminal completion-state (retriable:true)."); } else { final int attempt = retryAttempts.incrementAndGet(); delay = retryPolicy.calculateRetryDelay(error, attempt); if (delay != null) { logBuilder.addKeyValue("attempt", attempt) .addKeyValue("retryAfter", delay.toMillis()) .log("Current mediator reached terminal error-state (retriable:true).", error); } else { logBuilder.addKeyValue("attempt", attempt) .log("Current mediator reached terminal error-state (retriable:false) Or MessageFlux retries exhausted.", error); onError(error); return; } } try { scheduleNextMediatorRequest(delay, mediatorHolder); } catch (RejectedExecutionException ree) { final RuntimeException e = Operators.onRejectedExecution(ree, downstream.currentContext()); mediatorHolder.withReceiverInfo(logger.atWarning()) .log("Unable to schedule a request for a new mediator (retriable:false).", e); onError(e); } } /** * Schedule a task to request a new mediator. * * @param delay the backoff duration before requesting the next mediator. * @param mediatorHolder the mediator holder. * @throws RejectedExecutionException if the scheduler is unable to schedule the task. */ private void scheduleNextMediatorRequest(Duration delay, MediatorHolder mediatorHolder) { final Runnable task = () -> { final LoggingEventBuilder logBuilder = mediatorHolder.withReceiverInfo(logger.atWarning()); if (cancelled || done) { logBuilder.log("During the backoff, MessageFlux reached terminal-state [done:{}, cancelled:{}].", done, cancelled); return; } logBuilder.log("Requesting a new mediator."); upstream.request(1); }; mediatorHolder.nextMediatorRequestDisposable = Schedulers.parallel().schedule(task, delay.toMillis(), TimeUnit.MILLISECONDS); } }
yep, there are few places we use this pattern, so make sense to have a const and use key-value. **[DONE]**
private RecoveryTerminatedException checkRecoveryTerminated(String callSite) { final boolean isCacheTerminated = terminated; final boolean isConnectionTerminated = connection.isDisposed(); if (isCacheTerminated || isConnectionTerminated) { logger.atInfo() .addKeyValue(IS_CACHE_TERMINATED_KEY, isCacheTerminated) .addKeyValue(IS_CONNECTION_TERMINATED_KEY, isConnectionTerminated) .log("Channel recovery support is terminated. call-site:{}", callSite); return new RecoveryTerminatedException(connection.getId(), isCacheTerminated, isConnectionTerminated); } return null; }
.log("Channel recovery support is terminated. call-site:{}", callSite);
private RecoveryTerminatedException checkRecoveryTerminated(String callSite) { final boolean isCacheTerminated = terminated; final boolean isConnectionTerminated = connection.isDisposed(); if (isCacheTerminated || isConnectionTerminated) { logger.atInfo() .addKeyValue(IS_CACHE_TERMINATED_KEY, isCacheTerminated) .addKeyValue(IS_CONNECTION_TERMINATED_KEY, isConnectionTerminated) .addKeyValue(CALL_SITE_KEY, callSite) .log("Channel recovery support is terminated."); return new RecoveryTerminatedException(connection.getId(), isCacheTerminated, isConnectionTerminated); } return null; }
class RequestResponseChannelCache implements Disposable { private static final String IS_CACHE_TERMINATED_KEY = "isCacheTerminated"; private static final String IS_CONNECTION_TERMINATED_KEY = "isConnectionTerminated"; private static final String TRY_COUNT_KEY = "tryCount"; private final ClientLogger logger; private final ReactorConnection connection; private final Duration activationTimeout; private final Mono<RequestResponseChannel> createOrGetCachedChannel; private final Object lock = new Object(); private volatile boolean terminated; private volatile RequestResponseChannel currentChannel; RequestResponseChannelCache(ReactorConnection connection, String entityPath, String sessionName, String linksName, AmqpRetryPolicy retryPolicy) { Objects.requireNonNull(connection, "'connection' cannot be null."); Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); Objects.requireNonNull(sessionName, "'sessionName' cannot be null."); Objects.requireNonNull(linksName, "'linksName' cannot be null."); Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null."); final Map<String, Object> loggingContext = new HashMap<>(2); loggingContext.put(CONNECTION_ID_KEY, connection.getId()); loggingContext.put(LINK_NAME_KEY, linksName); this.logger = new ClientLogger(RequestResponseChannelCache.class, loggingContext); this.connection = connection; this.activationTimeout = retryPolicy.getRetryOptions().getTryTimeout(); final Mono<RequestResponseChannel> newChannel = Mono.defer(() -> { final RecoveryTerminatedException terminatedError = checkRecoveryTerminated("new-channel"); if (terminatedError != null) { return Mono.error(terminatedError); } return connection.newRequestResponseChannel(sessionName, linksName, entityPath); }); this.createOrGetCachedChannel = newChannel .flatMap(c -> { logger.atInfo() .log("Waiting for channel to active."); final Mono<RequestResponseChannel> awaitToActive = c.getEndpointStates() .filter(s -> s == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Channel completed without being active.", null))) .then(Mono.just(c)) .timeout(activationTimeout, Mono.defer(() -> { final String timeoutMessage = String.format("The channel activation wait timed-out (%s).", activationTimeout); logger.atInfo().log(timeoutMessage + " Closing channel."); return c.closeAsync().then(Mono.error(new AmqpException(true, timeoutMessage, null))); })); return awaitToActive .doOnCancel(() -> { logger.atInfo() .log("The channel request was canceled while waiting to active."); if (!c.isDisposed()) { c.closeAsync().subscribe(); } }); }) .retryWhen(retryWhenSpec(retryPolicy)) .<RequestResponseChannel>handle((c, sink) -> { final RequestResponseChannel channel = c; final RecoveryTerminatedException terminatedError; synchronized (lock) { terminatedError = checkRecoveryTerminated("cache-refresh"); this.currentChannel = channel; } if (terminatedError != null) { if (!channel.isDisposed()) { channel.closeAsync().subscribe(); } sink.error(terminatedError.propagate()); } else { logger.atInfo().log("Emitting the new active channel."); sink.next(channel); } }).cacheInvalidateIf(c -> { if (c.isDisposedOrDisposalInInProgress()) { logger.atInfo().log("The channel is closed, requesting a new channel."); return true; } else { return false; } }); } /** * Get the Mono that, when subscribed, emits the cached RequestResponseChannel if it is active or creates and * emits a new RequestResponseChannel if the cache is empty or the current cached RequestResponseChannel is in * closed state. * * @return a Mono that emits active RequestResponseChannel. */ public Mono<RequestResponseChannel> get() { return createOrGetCachedChannel; } /** * Terminate the cache such that it is no longer possible to obtain RequestResponseChannel using {@link this * If there is a current (cached) RequestResponseChannel then it will be closed. */ @Override public void dispose() { final RequestResponseChannel channel; synchronized (lock) { if (terminated) { return; } terminated = true; channel = currentChannel; } if (channel != null && !channel.isDisposed()) { logger.atInfo().log("Closing the cached channel and Terminating the channel recovery support."); channel.closeAsync().subscribe(); } else { logger.atInfo().log("Terminating the channel recovery support."); } } @Override public boolean isDisposed() { return terminated; } private Retry retryWhenSpec(AmqpRetryPolicy retryPolicy) { return Retry.from(retrySignals -> retrySignals .concatMap(retrySignal -> { final Retry.RetrySignal signal = retrySignal.copy(); final Throwable error = signal.failure(); final long iteration = signal.totalRetriesInARow(); if (error == null) { return Mono.error(new IllegalStateException("RetrySignal::failure() not expected to be null.")); } final boolean shouldRetry = error instanceof TimeoutException || (error instanceof AmqpException && ((AmqpException) error).isTransient() || (error instanceof IllegalStateException) || (error instanceof RejectedExecutionException)); if (!shouldRetry) { logger.atWarning() .addKeyValue(TRY_COUNT_KEY, iteration) .log("Exception is non-retriable, not retrying for a new channel.", error); if (error instanceof RecoveryTerminatedException) { return Mono.error(((RecoveryTerminatedException) error).propagate()); } else { return Mono.error(error); } } final Throwable errorToUse = error instanceof AmqpException ? error : new AmqpException(true, "Non-AmqpException occurred upstream.", error, null); final long attempts = Math.min(iteration, retryPolicy.getMaxRetries()); final Duration backoff = retryPolicy.calculateRetryDelay(errorToUse, (int) attempts); if (backoff == null) { logger.atWarning() .addKeyValue(TRY_COUNT_KEY, iteration) .log("Retry is disabled, not retrying for a new channel.", error); return Mono.error(error); } logger.atInfo() .addKeyValue(TRY_COUNT_KEY, iteration) .addKeyValue(INTERVAL_KEY, backoff.toMillis()) .log("Transient error occurred. Retrying.", error); return Mono.delay(backoff); })); } /** * Check if this cache is in a state where the cache refresh (i.e. recovery of RequestResponseChannel) is no longer * possible. * <p> * The recovery mechanism is terminated once the cache is terminated due to {@link RequestResponseChannelCache * call or the parent {@link ReactorConnection} is in terminated state. * Since the parent {@link ReactorConnection} hosts any RequestResponseChannel object that RequestResponseChannelCache * caches, recovery (scoped to the Connection) is impossible once the Connection is terminated * (i.e. connection.isDisposed() == true). Which also means RequestResponseChannelCache cannot outlive the Connection. * * @param callSite the call site checking the recovery termination (for logging). * @return {@link RecoveryTerminatedException} if the recovery is terminated, {@code null} otherwise. */ /** * The error type (internal to the cache) representing the termination of recovery support, which means cache cannot * be refreshed any longer. * @See {@link RequestResponseChannelCache */ private static final class RecoveryTerminatedException extends RuntimeException { private final String connectionId; private final String message; RecoveryTerminatedException(String connectionId, boolean isCacheTerminated, boolean isConnectionTerminated) { this.connectionId = connectionId; this.message = String.format("%s:%b %s:%b", IS_CACHE_TERMINATED_KEY, isCacheTerminated, IS_CONNECTION_TERMINATED_KEY, isConnectionTerminated); } /** * Translate this recovery terminated error to {@link RequestResponseChannelClosedException} to propagate * to the downstream of the {@link RequestResponseChannelCache}. * <p> * Termination of the recovery (due to Cache or Connection termination) means any cached RequestResponseChannel * is terminated or no new RequestResponseChannel can host on the Connection. In this case, we intentionally * propagate 'RequestResponseChannelClosedException' to downstream. If the downstream is a part async chain with * the {@link ReactorConnectionCache} as upstream, then the chain may retry on this specific error type to obtain * a new Connection and a new RequestResponseChannelCache which provides RequestResponseChannel hosted on this * new Connection. Examples of such async chains are those that enable Producer and Consumer recovery. * * @return the {@link RequestResponseChannelClosedException}. */ RequestResponseChannelClosedException propagate() { return new RequestResponseChannelClosedException(connectionId, message); } } }
class RequestResponseChannelCache implements Disposable { private static final String IS_CACHE_TERMINATED_KEY = "isCacheTerminated"; private static final String IS_CONNECTION_TERMINATED_KEY = "isConnectionTerminated"; private static final String TRY_COUNT_KEY = "tryCount"; private final ClientLogger logger; private final ReactorConnection connection; private final Duration activationTimeout; private final Mono<RequestResponseChannel> createOrGetCachedChannel; private final Object lock = new Object(); private volatile boolean terminated; private volatile RequestResponseChannel currentChannel; RequestResponseChannelCache(ReactorConnection connection, String entityPath, String sessionName, String linksName, AmqpRetryPolicy retryPolicy) { Objects.requireNonNull(connection, "'connection' cannot be null."); Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); Objects.requireNonNull(sessionName, "'sessionName' cannot be null."); Objects.requireNonNull(linksName, "'linksName' cannot be null."); Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null."); final Map<String, Object> loggingContext = new HashMap<>(2); loggingContext.put(CONNECTION_ID_KEY, connection.getId()); loggingContext.put(LINK_NAME_KEY, linksName); this.logger = new ClientLogger(RequestResponseChannelCache.class, loggingContext); this.connection = connection; this.activationTimeout = retryPolicy.getRetryOptions().getTryTimeout(); final Mono<RequestResponseChannel> newChannel = Mono.defer(() -> { final RecoveryTerminatedException terminatedError = checkRecoveryTerminated("new-channel"); if (terminatedError != null) { return Mono.error(terminatedError); } return connection.newRequestResponseChannel(sessionName, linksName, entityPath); }); this.createOrGetCachedChannel = newChannel .flatMap(c -> { logger.atInfo() .log("Waiting for channel to active."); final Mono<RequestResponseChannel> awaitToActive = c.getEndpointStates() .filter(s -> s == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Channel completed without being active.", null))) .then(Mono.just(c)) .timeout(activationTimeout, Mono.defer(() -> { final String timeoutMessage = String.format("The channel activation wait timed-out (%s).", activationTimeout); logger.atInfo().log(timeoutMessage + " Closing channel."); return c.closeAsync().then(Mono.error(new AmqpException(true, timeoutMessage, null))); })); return awaitToActive .doOnCancel(() -> { logger.atInfo() .log("The channel request was canceled while waiting to active."); if (!c.isDisposed()) { c.closeAsync().subscribe(); } }); }) .retryWhen(retryWhenSpec(retryPolicy)) .<RequestResponseChannel>handle((c, sink) -> { final RequestResponseChannel channel = c; final RecoveryTerminatedException terminatedError; synchronized (lock) { terminatedError = checkRecoveryTerminated("cache-refresh"); this.currentChannel = channel; } if (terminatedError != null) { if (!channel.isDisposed()) { channel.closeAsync().subscribe(); } sink.error(terminatedError.propagate()); } else { logger.atInfo().log("Emitting the new active channel."); sink.next(channel); } }).cacheInvalidateIf(c -> { if (c.isDisposedOrDisposalInInProgress()) { logger.atInfo().log("The channel is closed, requesting a new channel."); return true; } else { return false; } }); } /** * Get the Mono that, when subscribed, emits the cached RequestResponseChannel if it is active or creates and * emits a new RequestResponseChannel if the cache is empty or the current cached RequestResponseChannel is in * closed state. * * @return a Mono that emits active RequestResponseChannel. */ public Mono<RequestResponseChannel> get() { return createOrGetCachedChannel; } /** * Terminate the cache such that it is no longer possible to obtain RequestResponseChannel using {@link this * If there is a current (cached) RequestResponseChannel then it will be closed. */ @Override public void dispose() { final RequestResponseChannel channel; synchronized (lock) { if (terminated) { return; } terminated = true; channel = currentChannel; } if (channel != null && !channel.isDisposed()) { logger.atInfo().log("Closing the cached channel and Terminating the channel recovery support."); channel.closeAsync().subscribe(); } else { logger.atInfo().log("Terminating the channel recovery support."); } } @Override public boolean isDisposed() { return terminated; } private Retry retryWhenSpec(AmqpRetryPolicy retryPolicy) { return Retry.from(retrySignals -> retrySignals .concatMap(retrySignal -> { final Retry.RetrySignal signal = retrySignal.copy(); final Throwable error = signal.failure(); final long iteration = signal.totalRetriesInARow(); if (error == null) { return Mono.error(new IllegalStateException("RetrySignal::failure() not expected to be null.")); } final boolean shouldRetry = error instanceof TimeoutException || (error instanceof AmqpException && ((AmqpException) error).isTransient() || (error instanceof IllegalStateException) || (error instanceof RejectedExecutionException)); if (!shouldRetry) { logger.atWarning() .addKeyValue(TRY_COUNT_KEY, iteration) .log("Exception is non-retriable, not retrying for a new channel.", error); if (error instanceof RecoveryTerminatedException) { return Mono.error(((RecoveryTerminatedException) error).propagate()); } else { return Mono.error(error); } } final Throwable errorToUse = error instanceof AmqpException ? error : new AmqpException(true, "Non-AmqpException occurred upstream.", error, null); final long attempts = Math.min(iteration, retryPolicy.getMaxRetries()); final Duration backoff = retryPolicy.calculateRetryDelay(errorToUse, (int) attempts); if (backoff == null) { logger.atWarning() .addKeyValue(TRY_COUNT_KEY, iteration) .log("Retry is disabled, not retrying for a new channel.", error); return Mono.error(error); } logger.atInfo() .addKeyValue(TRY_COUNT_KEY, iteration) .addKeyValue(INTERVAL_KEY, backoff.toMillis()) .log("Transient error occurred. Retrying.", error); return Mono.delay(backoff); })); } /** * Check if this cache is in a state where the cache refresh (i.e. recovery of RequestResponseChannel) is no longer * possible. * <p> * The recovery mechanism is terminated once the cache is terminated due to {@link RequestResponseChannelCache * call or the parent {@link ReactorConnection} is in terminated state. * Since the parent {@link ReactorConnection} hosts any RequestResponseChannel object that RequestResponseChannelCache * caches, recovery (scoped to the Connection) is impossible once the Connection is terminated * (i.e. connection.isDisposed() == true). Which also means RequestResponseChannelCache cannot outlive the Connection. * * @param callSite the call site checking the recovery termination (for logging). * @return {@link RecoveryTerminatedException} if the recovery is terminated, {@code null} otherwise. */ /** * The error type (internal to the cache) representing the termination of recovery support, which means cache cannot * be refreshed any longer. * @See {@link RequestResponseChannelCache */ private static final class RecoveryTerminatedException extends RuntimeException { private final String connectionId; private final String message; RecoveryTerminatedException(String connectionId, boolean isCacheTerminated, boolean isConnectionTerminated) { this.connectionId = connectionId; this.message = String.format("%s:%b %s:%b", IS_CACHE_TERMINATED_KEY, isCacheTerminated, IS_CONNECTION_TERMINATED_KEY, isConnectionTerminated); } /** * Translate this recovery terminated error to {@link RequestResponseChannelClosedException} to propagate * to the downstream of the {@link RequestResponseChannelCache}. * <p> * Termination of the recovery (due to Cache or Connection termination) means any cached RequestResponseChannel * is terminated or no new RequestResponseChannel can host on the Connection. In this case, we intentionally * propagate 'RequestResponseChannelClosedException' to downstream. If the downstream is a part async chain with * the {@link ReactorConnectionCache} as upstream, then the chain may retry on this specific error type to obtain * a new Connection and a new RequestResponseChannelCache which provides RequestResponseChannel hosted on this * new Connection. Examples of such async chains are those that enable Producer and Consumer recovery. * * @return the {@link RequestResponseChannelClosedException}. */ RequestResponseChannelClosedException propagate() { return new RequestResponseChannelClosedException(connectionId, message); } } }
Yes, this is better, let’s follow this pattern, thanks! **[DONE]**
private Mono<Boolean> beginClose(ErrorCondition errorCondition) { final Runnable localClose = () -> { if (receiver.getLocalState() != EndpointState.CLOSED) { receiver.close(); if (receiver.getCondition() == null) { receiver.setCondition(errorCondition); } } }; final Mono<Boolean> localCloseMono = Mono.create(sink -> { boolean localCloseScheduled = false; try { dispatcher.invoke(localClose); localCloseScheduled = true; } catch (IOException e) { logger.warning("IO sink was closed when scheduling work. Manually invoking and completing close.", e); localClose.run(); terminateEndpointState(); completeClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException when scheduling on ReactorDispatcher. Manually invoking and completing close."); localClose.run(); terminateEndpointState(); completeClose(); } finally { sink.success(localCloseScheduled); } }); return handler.beginClose(localCloseMono); }
return handler.beginClose(localCloseMono);
private Mono<Boolean> beginClose(ErrorCondition errorCondition) { final Runnable localClose = () -> { if (receiver.getLocalState() != EndpointState.CLOSED) { receiver.close(); if (receiver.getCondition() == null) { receiver.setCondition(errorCondition); } } }; final Mono<Boolean> localCloseMono = Mono.create(sink -> { boolean localCloseScheduled = false; try { dispatcher.invoke(localClose); localCloseScheduled = true; } catch (IOException e) { logger.warning("IO sink was closed when scheduling work. Manually invoking and completing close.", e); localClose.run(); terminateEndpointState(); completeClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException when scheduling on ReactorDispatcher. Manually invoking and completing close."); localClose.run(); terminateEndpointState(); completeClose(); } finally { sink.success(localCloseScheduled); } }); return handler.beginClose().then(localCloseMono); }
class ReactorReceiver implements AmqpReceiveLink, AsyncCloseable, AutoCloseable { private static final Symbol SEQUENCE_NUMBER_ANNOTATION = Symbol.valueOf(SEQUENCE_NUMBER_ANNOTATION_NAME.getValue()); private final String entityPath; private final Receiver receiver; private final ReceiveLinkHandlerWrapper handler; private final TokenManager tokenManager; private final ReactorDispatcher dispatcher; private final Disposable subscriptions; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isCompleteCloseCalled = new AtomicBoolean(); private final Flux<Message> messagesProcessor; private final AmqpRetryOptions retryOptions; private final ClientLogger logger; private final boolean isV2; private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<AmqpEndpointState> terminateEndpointStates = Sinks.empty(); private final AtomicReference<Supplier<Integer>> creditSupplier = new AtomicReference<>(); private final AmqpMetricsProvider metricsProvider; private final AtomicLong lastSequenceNumber = new AtomicLong(); private final AutoCloseable trackPrefetchSeqNoSubscription; protected ReactorReceiver(AmqpConnection amqpConnection, String entityPath, Receiver receiver, ReceiveLinkHandlerWrapper handler, TokenManager tokenManager, ReactorDispatcher dispatcher, AmqpRetryOptions retryOptions, AmqpMetricsProvider metricsProvider) { this.entityPath = entityPath; this.receiver = receiver; this.handler = handler; this.tokenManager = tokenManager; this.dispatcher = dispatcher; this.metricsProvider = metricsProvider; this.trackPrefetchSeqNoSubscription = this.metricsProvider.trackPrefetchSequenceNumber(lastSequenceNumber::get); Map<String, Object> loggingContext = createContextWithConnectionId(handler.getConnectionId()); loggingContext.put(LINK_NAME_KEY, this.handler.getLinkName()); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(ReactorReceiver.class, loggingContext); handler.setLogger(this.logger); this.isV2 = handler.isV2(); if (!this.isV2) { this.messagesProcessor = this.handler.getDeliveredMessagesV1() .flatMap(delivery -> { return Mono.create(sink -> { try { this.dispatcher.invoke(() -> { if (isDisposed()) { sink.error(new IllegalStateException( "Cannot decode delivery when ReactorReceiver instance is closed.")); return; } final Message message = decodeDelivery(delivery); if (metricsProvider.isPrefetchedSequenceNumberEnabled()) { Long seqNo = getSequenceNumber(message); if (seqNo != null) { lastSequenceNumber.set(seqNo); } } final int creditsLeft = receiver.getRemoteCredit(); if (creditsLeft > 0) { sink.success(message); return; } final Supplier<Integer> supplier = creditSupplier.get(); final Integer credits = supplier.get(); if (credits != null && credits > 0) { logger.atVerbose() .addKeyValue("credits", credits) .log("Adding credits."); receiver.flow(credits); } else { logger.atVerbose() .addKeyValue("credits", credits) .log("There are no credits to add."); } metricsProvider.recordAddCredits(credits == null ? 0 : credits); sink.success(message); }); } catch (IOException | RejectedExecutionException e) { sink.error(e); } }); }, 1); } else { this.messagesProcessor = this.handler.getDeliveredMessagesV2() .map(message -> { if (metricsProvider.isPrefetchedSequenceNumberEnabled()) { Long seqNo = getSequenceNumber(message); if (seqNo != null) { lastSequenceNumber.set(seqNo); } } return message; }); } this.retryOptions = retryOptions; this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.atVerbose() .log("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { final String message = isDisposed.getAndSet(true) ? "This was already disposed. Dropping error." : "Freeing resources due to error."; logger.atInfo() .log(message); completeClose(); }) .doOnComplete(() -> { final String message = isDisposed.getAndSet(true) ? "This was already disposed." : "Freeing resources."; logger.atVerbose() .log(message); completeClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.tokenManager.getAuthorizationResults() .onErrorResume(error -> { final Mono<Void> operation = closeAsync("Token renewal failure. Disposing receive link.", new ErrorCondition(Symbol.getSymbol(AmqpErrorCondition.NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> logger.atVerbose() .addKeyValue("response", response) .log("Token refreshed."), error -> { }, () -> { logger.atVerbose() .log("Authorization completed."); closeAsync("Authorization completed. Disposing.", null).subscribe(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); return closeAsync("Connection shutdown.", null); }).subscribe()); } @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates .distinctUntilChanged() .takeUntilOther(terminateEndpointStates.asMono()); } @Override public String getConnectionId() { return handler.getConnectionId(); } @Override public Flux<Message> receive() { return messagesProcessor; } @Override public Mono<Void> updateDisposition(String deliveryTag, DeliveryState deliveryState) { return handler.sendDisposition(deliveryTag, deliveryState); } @Override public Mono<Void> addCredits(int credits) { if (isDisposed()) { return monoError(logger, new IllegalStateException("Cannot add credits to closed link: " + getLinkName())); } return Mono.create(sink -> { try { dispatcher.invoke(() -> { receiver.flow(credits); metricsProvider.recordAddCredits(credits); sink.success(); }); } catch (IOException e) { sink.error(new UncheckedIOException(String.format( "connectionId[%s] linkName[%s] Unable to schedule work to add more credits.", handler.getConnectionId(), getLinkName()), e)); } catch (RejectedExecutionException e) { sink.error(e); } }); } @Override public void addCredit(Supplier<Long> creditSupplier) { assert isV2; if (isDisposed()) { throw new RejectedExecutionException("Cannot schedule credit flow when the link is disposed."); } try { dispatcher.invoke(() -> { final long credit = creditSupplier.get(); receiver.flow((int) credit); metricsProvider.recordAddCredits((int) credit); }); } catch (IOException e) { throw new UncheckedIOException("Unable to schedule credit flow.", e); } } @Override public int getCredits() { return receiver.getRemoteCredit(); } @Override public void setEmptyCreditListener(Supplier<Integer> creditSupplier) { assert !isV2; Objects.requireNonNull(creditSupplier, "'creditSupplier' cannot be null."); this.creditSupplier.set(creditSupplier); } @Override public String getLinkName() { return handler.getLinkName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override public boolean isDisposed() { return isDisposed.get(); } @Override public void dispose() { close(); } @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } protected Message decodeDelivery(Delivery delivery) { assert !isV2; final int messageSize = delivery.pending(); final byte[] buffer = new byte[messageSize]; final int read = receiver.recv(buffer, 0, messageSize); receiver.advance(); final Message message = Proton.message(); message.decode(buffer, 0, read); delivery.settle(); return message; } /** * Disposes of the receiver when an exception is encountered. * <p> * While {@link ReactorReceiver * contract, this API performs the same disposal with additional * contextual information. For example, the context may indicate if the resource needs to be disposed of * internally when there is an error in the link, session or connection. * </p> * * <p> * Closing ReactorReceiver involves 3 stages, running in following order - * <ul> * <li>local-close (client to broker) via beginClose() </li> * <li>remote-close ack (broker to client)</li> * <li>disposal of ReactorReceiver resources via completeClose()</li> * </ul> * @link <a href="https: * * @param message Message to log. * @param errorCondition Error condition associated with close operation. */ protected Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return getIsClosedMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .log("Setting error condition and disposing. {}", message); return beginClose(errorCondition) .flatMap(localCloseScheduled -> { if (localCloseScheduled) { return timeoutRemoteCloseAck(); } else { return Mono.empty(); } }) .publishOn(Schedulers.boundedElastic()); } /** * Gets the Mono that signals completion when the disposal/closing of ReactorReceiver is completed. * * @return the disposal/closing completion Mono. */ protected Mono<Void> getIsClosedMono() { return isClosedMono.asMono().publishOn(Schedulers.boundedElastic()); } protected void onHandlerClose() { assert !isV2; } /** * Begins the client side close by requesting receive link handler for any graceful resource * cleanup, then initiating local-close on underlying receiver. * * @param errorCondition Error condition associated with close operation. * @return a {@link Mono} when subscribed attempt to initiate local-close, emitting {@code true} * if local-close is scheduled on the dispatcher, emits {@code false} if unable to schedule * local-close that lead to manual close. */ /** * Apply timeout on remote-close ack. If timeout happens, i.e., if remote-close ack doesn't arrive within * the timeout duration, then terminate the Flux returned by getEndpointStates() and complete close. * * a {@link Mono} that registers remote-close ack timeout based close cleanup. */ private Mono<Void> timeoutRemoteCloseAck() { return isClosedMono.asMono() .timeout(retryOptions.getTryTimeout()) .onErrorResume(error -> { if (error instanceof TimeoutException) { logger.info("Timeout waiting for RemoteClose. Manually terminating EndpointStates and completing close."); terminateEndpointState(); completeClose(); } return Mono.empty(); }) .subscribeOn(Schedulers.boundedElastic()); } /** * Terminate the Flux returned by the getEndpointStates() API. * * <p> * The termination of Flux returned by getEndpointStates() is the signal that "AmqpReceiveLinkProcessor" * uses to either terminate its downstream or obtain a new ReactorReceiver to continue delivering events * downstream. * </p> */ private void terminateEndpointState() { terminateEndpointStates.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atVerbose(), signalType, emitResult) .log("Could not emit EndpointStates termination."); return false; }); } /** * Completes the closing of the underlying receiver, which includes disposing of subscriptions, * closing of token manager, and releasing of protonJ resources. * <p> * The completeClose invoked in 3 cases - when the broker ack for beginClose (i.e. ack via * remote-close frame), if the broker ack for beginClose never comes through within timeout, * if the client fails to run beginClose. * </p> */ private void completeClose() { if (isCompleteCloseCalled.getAndSet(true)) { return; } isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } handler.close(); if (!isV2) { onHandlerClose(); } receiver.free(); try { trackPrefetchSeqNoSubscription.close(); } catch (Exception e) { logger.verbose("Error closing metrics subscription.", e); } } private Long getSequenceNumber(Message message) { if (message == null || message.getMessageAnnotations() == null || message.getBody() == null) { return null; } Map<Symbol, Object> properties = message.getMessageAnnotations().getValue(); Object seqNo = properties != null ? properties.get(SEQUENCE_NUMBER_ANNOTATION) : null; if (seqNo instanceof Integer) { return ((Integer) seqNo).longValue(); } else if (seqNo instanceof Long) { return (Long) seqNo; } else if (seqNo != null) { logger.verbose("Received message has unexpected `x-opt-sequence-number` annotation value - `{}`. Ignoring it.", seqNo); } return null; } @Override public String toString() { return String.format("connectionId: [%s] entity path: [%s] linkName: [%s]", receiver.getName(), entityPath, getLinkName()); } }
class ReactorReceiver implements AmqpReceiveLink, AsyncCloseable, AutoCloseable { private static final Symbol SEQUENCE_NUMBER_ANNOTATION = Symbol.valueOf(SEQUENCE_NUMBER_ANNOTATION_NAME.getValue()); private final String entityPath; private final Receiver receiver; private final ReceiveLinkHandlerWrapper handler; private final TokenManager tokenManager; private final ReactorDispatcher dispatcher; private final Disposable subscriptions; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isCompleteCloseCalled = new AtomicBoolean(); private final Flux<Message> messagesProcessor; private final AmqpRetryOptions retryOptions; private final ClientLogger logger; private final boolean isV2; private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<AmqpEndpointState> terminateEndpointStates = Sinks.empty(); private final AtomicReference<Supplier<Integer>> creditSupplier = new AtomicReference<>(); private final AmqpMetricsProvider metricsProvider; private final AtomicLong lastSequenceNumber = new AtomicLong(); private final AutoCloseable trackPrefetchSeqNoSubscription; protected ReactorReceiver(AmqpConnection amqpConnection, String entityPath, Receiver receiver, ReceiveLinkHandlerWrapper handler, TokenManager tokenManager, ReactorDispatcher dispatcher, AmqpRetryOptions retryOptions, AmqpMetricsProvider metricsProvider) { this.entityPath = entityPath; this.receiver = receiver; this.handler = handler; this.tokenManager = tokenManager; this.dispatcher = dispatcher; this.metricsProvider = metricsProvider; this.trackPrefetchSeqNoSubscription = this.metricsProvider.trackPrefetchSequenceNumber(lastSequenceNumber::get); Map<String, Object> loggingContext = createContextWithConnectionId(handler.getConnectionId()); loggingContext.put(LINK_NAME_KEY, this.handler.getLinkName()); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(ReactorReceiver.class, loggingContext); handler.setLogger(this.logger); this.isV2 = handler.isV2(); if (!this.isV2) { this.messagesProcessor = this.handler.getDeliveredMessagesV1() .flatMap(delivery -> { return Mono.create(sink -> { try { this.dispatcher.invoke(() -> { if (isDisposed()) { sink.error(new IllegalStateException( "Cannot decode delivery when ReactorReceiver instance is closed.")); return; } final Message message = decodeDelivery(delivery); if (metricsProvider.isPrefetchedSequenceNumberEnabled()) { Long seqNo = getSequenceNumber(message); if (seqNo != null) { lastSequenceNumber.set(seqNo); } } final int creditsLeft = receiver.getRemoteCredit(); if (creditsLeft > 0) { sink.success(message); return; } final Supplier<Integer> supplier = creditSupplier.get(); final Integer credits = supplier.get(); if (credits != null && credits > 0) { logger.atVerbose() .addKeyValue("credits", credits) .log("Adding credits."); receiver.flow(credits); } metricsProvider.recordAddCredits(credits == null ? 0 : credits); sink.success(message); }); } catch (IOException | RejectedExecutionException e) { sink.error(e); } }); }, 1); } else { if (metricsProvider.isPrefetchedSequenceNumberEnabled()) { this.messagesProcessor = this.handler.getDeliveredMessagesV2() .map(message -> { final Long seqNo = getSequenceNumber(message); if (seqNo != null) { lastSequenceNumber.set(seqNo); } return message; }); } else { this.messagesProcessor = this.handler.getDeliveredMessagesV2(); } } this.retryOptions = retryOptions; this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.atVerbose() .log("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { final String message = isDisposed.getAndSet(true) ? "This was already disposed. Dropping error." : "Freeing resources due to error."; logger.atInfo() .log(message); completeClose(); }) .doOnComplete(() -> { final String message = isDisposed.getAndSet(true) ? "This was already disposed." : "Freeing resources."; logger.atVerbose() .log(message); completeClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.tokenManager.getAuthorizationResults() .onErrorResume(error -> { final Mono<Void> operation = closeAsync("Token renewal failure. Disposing receive link.", new ErrorCondition(Symbol.getSymbol(AmqpErrorCondition.NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> logger.atVerbose() .addKeyValue("response", response) .log("Token refreshed."), error -> { }, () -> { logger.atVerbose() .log("Authorization completed."); closeAsync("Authorization completed. Disposing.", null).subscribe(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); return closeAsync("Connection shutdown.", null); }).subscribe()); } @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates .distinctUntilChanged() .takeUntilOther(terminateEndpointStates.asMono()); } @Override public String getConnectionId() { return handler.getConnectionId(); } @Override public Flux<Message> receive() { return messagesProcessor; } @Override public Mono<Void> updateDisposition(String deliveryTag, DeliveryState deliveryState) { return handler.sendDisposition(deliveryTag, deliveryState); } @Override public Mono<Void> addCredits(int credits) { if (isDisposed()) { return monoError(logger, new IllegalStateException("Cannot add credits to closed link: " + getLinkName())); } return Mono.create(sink -> { try { dispatcher.invoke(() -> { receiver.flow(credits); metricsProvider.recordAddCredits(credits); sink.success(); }); } catch (IOException e) { sink.error(new UncheckedIOException(String.format( "connectionId[%s] linkName[%s] Unable to schedule work to add more credits.", handler.getConnectionId(), getLinkName()), e)); } catch (RejectedExecutionException e) { sink.error(e); } }); } @Override public void addCredit(Supplier<Long> creditSupplier) { assert isV2; if (isDisposed()) { throw new RejectedExecutionException("Cannot schedule credit flow when the link is disposed."); } try { dispatcher.invoke(() -> { final long credit = creditSupplier.get(); receiver.flow((int) credit); metricsProvider.recordAddCredits((int) credit); }); } catch (IOException e) { throw new UncheckedIOException("Unable to schedule credit flow.", e); } } @Override public int getCredits() { return receiver.getRemoteCredit(); } @Override public void setEmptyCreditListener(Supplier<Integer> creditSupplier) { assert !isV2; Objects.requireNonNull(creditSupplier, "'creditSupplier' cannot be null."); this.creditSupplier.set(creditSupplier); } @Override public String getLinkName() { return handler.getLinkName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override public boolean isDisposed() { return isDisposed.get(); } @Override public void dispose() { close(); } @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } protected Message decodeDelivery(Delivery delivery) { assert !isV2; final int messageSize = delivery.pending(); final byte[] buffer = new byte[messageSize]; final int read = receiver.recv(buffer, 0, messageSize); receiver.advance(); final Message message = Proton.message(); message.decode(buffer, 0, read); delivery.settle(); return message; } /** * Disposes of the receiver when an exception is encountered. * <p> * While {@link ReactorReceiver * contract, this API performs the same disposal with additional * contextual information. For example, the context may indicate if the resource needs to be disposed of * internally when there is an error in the link, session or connection. * </p> * * <p> * Closing ReactorReceiver involves 3 stages, running in following order - * <ul> * <li>local-close (client to broker) via beginClose() </li> * <li>remote-close ack (broker to client)</li> * <li>disposal of ReactorReceiver resources via completeClose()</li> * </ul> * @link <a href="https: * * @param message Message to log. * @param errorCondition Error condition associated with close operation. */ protected Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return getIsClosedMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .log("Setting error condition and disposing. {}", message); return beginClose(errorCondition) .flatMap(localCloseScheduled -> { if (localCloseScheduled) { return timeoutRemoteCloseAck(); } else { return Mono.empty(); } }) .publishOn(Schedulers.boundedElastic()); } /** * Gets the Mono that signals completion when the disposal/closing of ReactorReceiver is completed. * * @return the disposal/closing completion Mono. */ protected Mono<Void> getIsClosedMono() { return isClosedMono.asMono().publishOn(Schedulers.boundedElastic()); } protected void onHandlerClose() { assert !isV2; } /** * Begins the client side close by requesting receive link handler for any graceful resource * cleanup, then initiating local-close on underlying receiver. * * @param errorCondition Error condition associated with close operation. * @return a {@link Mono} when subscribed attempt to initiate local-close, emitting {@code true} * if local-close is scheduled on the dispatcher, emits {@code false} if unable to schedule * local-close that lead to manual close. */ /** * Apply timeout on remote-close ack. If timeout happens, i.e., if remote-close ack doesn't arrive within * the timeout duration, then terminate the Flux returned by getEndpointStates() and complete close. * * a {@link Mono} that registers remote-close ack timeout based close cleanup. */ private Mono<Void> timeoutRemoteCloseAck() { return isClosedMono.asMono() .timeout(retryOptions.getTryTimeout()) .onErrorResume(error -> { if (error instanceof TimeoutException) { logger.info("Timeout waiting for RemoteClose. Manually terminating EndpointStates and completing close."); terminateEndpointState(); completeClose(); } return Mono.empty(); }) .subscribeOn(Schedulers.boundedElastic()); } /** * Terminate the Flux returned by the getEndpointStates() API. * * <p> * The termination of Flux returned by getEndpointStates() is the signal that "AmqpReceiveLinkProcessor" * uses to either terminate its downstream or obtain a new ReactorReceiver to continue delivering events * downstream. * </p> */ private void terminateEndpointState() { terminateEndpointStates.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atVerbose(), signalType, emitResult) .log("Could not emit EndpointStates termination."); return false; }); } /** * Completes the closing of the underlying receiver, which includes disposing of subscriptions, * closing of token manager, and releasing of protonJ resources. * <p> * The completeClose invoked in 3 cases - when the broker ack for beginClose (i.e. ack via * remote-close frame), if the broker ack for beginClose never comes through within timeout, * if the client fails to run beginClose. * </p> */ private void completeClose() { if (isCompleteCloseCalled.getAndSet(true)) { return; } isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } handler.close(); if (!isV2) { onHandlerClose(); } receiver.free(); try { trackPrefetchSeqNoSubscription.close(); } catch (Exception e) { logger.verbose("Error closing metrics subscription.", e); } } private Long getSequenceNumber(Message message) { if (message == null || message.getMessageAnnotations() == null || message.getBody() == null) { return null; } Map<Symbol, Object> properties = message.getMessageAnnotations().getValue(); Object seqNo = properties != null ? properties.get(SEQUENCE_NUMBER_ANNOTATION) : null; if (seqNo instanceof Integer) { return ((Integer) seqNo).longValue(); } else if (seqNo instanceof Long) { return (Long) seqNo; } else if (seqNo != null) { logger.verbose("Received message has unexpected `x-opt-sequence-number` annotation value - `{}`. Ignoring it.", seqNo); } return null; } @Override public String toString() { return String.format("connectionId: [%s] entity path: [%s] linkName: [%s]", receiver.getName(), entityPath, getLinkName()); } }
I don't often see anonymous classes... I don't know if we have guidelines around using them. I feel like it is easier to read as a function: ```java final Answer<Mono<Void>> response = invocation -> { return Mono.empty(); }; ``` This applies to the other instances where an anonymous class is used.
void shouldRecoverFromRetriableSendLinkError() { final int sessionsCnt = 1; final int[] linksPerSession = new int[] { 2 }; try (MockEndpoint endpoint = createMockEndpoint(sessionsCnt, linksPerSession)) { final Supplier<ServiceBusReactorAmqpConnection> connectionSupplier = singleConnectionSupplier(endpoint); final Answer<Mono<Void>> sendAnswer = new Answer<Mono<Void>>() { private int invocationCount = -1; @Override public Mono<Void> answer(InvocationOnMock invocation) { invocationCount++; switch (invocationCount) { case 0: endpoint.emitCurrentSendLinkError(RETRIABLE_LINK_ERROR); return Mono.error(RETRIABLE_LINK_ERROR); case 1: return Mono.empty(); default: throw new RuntimeException("More than two invocations of send-answer is not expected."); } } }; when(endpoint.getAmqpSendLink(0, 0).send(anyList())).thenAnswer(sendAnswer); when(endpoint.getAmqpSendLink(0, 1).send(anyList())).thenAnswer(sendAnswer); final int messagesCount = 4; final List<ServiceBusMessage> messagesToSend = createMessagesToSend(messagesCount); final ReactorConnectionCache<ServiceBusReactorAmqpConnection> connectionCache = createConnectionCache(connectionSupplier); final ServiceBusSenderAsyncClient sender = createSenderAsyncClient(connectionCache, false); try { try (VirtualTimeStepVerifier verifier = new VirtualTimeStepVerifier()) { verifier.create(() -> sender.sendMessages(messagesToSend)) .thenAwait(VIRTUAL_TIME_SHIFT) .verifyComplete(); } verify(endpoint.getAmqpSendLink(0, 0)).send(sendMessagesCaptor0.capture()); final List<Message> messagesSendInSession0Link0 = sendMessagesCaptor0.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession0Link0.size()); verify(endpoint.getAmqpSendLink(0, 1)).send(sendMessagesCaptor1.capture()); final List<Message> messagesSendInSession0Link1 = sendMessagesCaptor1.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession0Link1.size()); } finally { sender.close(); connectionCache.dispose(); } } }
final Answer<Mono<Void>> sendAnswer = new Answer<Mono<Void>>() {
void shouldRecoverFromRetriableSendLinkError() { final int sessionsCnt = 1; final int[] linksPerSession = new int[] { 2 }; try (MockEndpoint endpoint = createMockEndpoint(sessionsCnt, linksPerSession)) { final Supplier<ServiceBusReactorAmqpConnection> connectionSupplier = singleConnectionSupplier(endpoint); final Answer<Mono<Void>> sendAnswer = new Answer<Mono<Void>>() { private int invocationCount = -1; @Override public Mono<Void> answer(InvocationOnMock invocation) { invocationCount++; switch (invocationCount) { case 0: endpoint.emitCurrentSendLinkError(RETRIABLE_LINK_ERROR); return Mono.error(RETRIABLE_LINK_ERROR); case 1: return Mono.empty(); default: throw new RuntimeException("More than two invocations of send-answer is not expected."); } } }; when(endpoint.getAmqpSendLink(0, 0).send(anyList())).thenAnswer(sendAnswer); when(endpoint.getAmqpSendLink(0, 1).send(anyList())).thenAnswer(sendAnswer); final int messagesCount = 4; final List<ServiceBusMessage> messagesToSend = createMessagesToSend(messagesCount); final ReactorConnectionCache<ServiceBusReactorAmqpConnection> connectionCache = createConnectionCache(connectionSupplier); final ServiceBusSenderAsyncClient sender = createSenderAsyncClient(connectionCache, false); try { try (VirtualTimeStepVerifier verifier = new VirtualTimeStepVerifier()) { verifier.create(() -> sender.sendMessages(messagesToSend)) .thenAwait(VIRTUAL_TIME_SHIFT) .verifyComplete(); } verify(endpoint.getAmqpSendLink(0, 0)).send(sendMessagesCaptor0.capture()); final List<Message> messagesSendInSession0Link0 = sendMessagesCaptor0.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession0Link0.size()); verify(endpoint.getAmqpSendLink(0, 1)).send(sendMessagesCaptor1.capture()); final List<Message> messagesSendInSession0Link1 = sendMessagesCaptor1.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession0Link1.size()); } finally { sender.close(); connectionCache.dispose(); } } }
class ServiceBusSenderAsyncClientRecoveryIsolatedTest { private static final BinaryData TEST_CONTENTS = BinaryData.fromString("My message for service bus queue!"); private static final String FQDN = "contoso-shopping.servicebus.windows.net"; private static final String QUEUE_NAME = "orders"; private static final String CLIENT_IDENTIFIER = "client-identifier"; private static final ServiceBusSenderInstrumentation DEFAULT_INSTRUMENTATION = new ServiceBusSenderInstrumentation( null, null, FQDN, QUEUE_NAME); private static final Duration VIRTUAL_TIME_SHIFT = OPERATION_TIMEOUT.plusSeconds(30); private static final AmqpException RETRIABLE_LINK_ERROR = new AmqpException(true, AmqpErrorCondition.LINK_DETACH_FORCED, "detach-link-error", new AmqpErrorContext(FQDN)); private static final AmqpException RETRIABLE_SESSION_ERROR = new AmqpException(true, "session-error", new AmqpErrorContext(FQDN)); private static final AmqpException RETRIABLE_CONNECTION_ERROR = new AmqpException(true, AmqpErrorCondition.CONNECTION_FORCED, "connection-forced-error", new AmqpErrorContext(FQDN)); private static final AmqpException NON_RETRIABLE_ERROR_1 = new AmqpException(false, AmqpErrorCondition.NOT_ALLOWED, "not-allowed-error-1", new AmqpErrorContext(FQDN)); private static final AmqpException NON_RETRIABLE_ERROR_2 = new AmqpException(false, AmqpErrorCondition.NOT_ALLOWED, "not-allowed-error-2", new AmqpErrorContext(FQDN)); private static final AmqpRetryOptions RETRY_OPTIONS = new AmqpRetryOptions() .setMode(AmqpRetryMode.FIXED) .setMaxRetries(10) .setMaxDelay(Duration.ofSeconds(5)) .setDelay(Duration.ofSeconds(1)) .setTryTimeout(OPERATION_TIMEOUT); private final MessageSerializer messageSerializer = new ServiceBusMessageSerializer(); @Mock private Runnable onClientClosed; @Captor private ArgumentCaptor<List<Message>> sendMessagesCaptor0; @Captor private ArgumentCaptor<List<Message>> sendMessagesCaptor1; private AutoCloseable mocksCloseable; @BeforeEach void setup() { mocksCloseable = MockitoAnnotations.openMocks(this); } @AfterEach void teardown() throws Exception { Mockito.framework().clearInlineMock(this); if (mocksCloseable != null) { mocksCloseable.close(); } } @Test @Execution(ExecutionMode.SAME_THREAD) @Test @Execution(ExecutionMode.SAME_THREAD) void shouldBubbleUpNonRetriableSendLinkError() { final int sessionsCnt = 1; final int[] linksPerSession = new int[] { 2 }; try (MockEndpoint endpoint = createMockEndpoint(sessionsCnt, linksPerSession)) { final Supplier<ServiceBusReactorAmqpConnection> connectionSupplier = singleConnectionSupplier(endpoint); final Answer<Mono<Void>> sendAnswer = new Answer<Mono<Void>>() { private int invocationCount = -1; @Override public Mono<Void> answer(InvocationOnMock invocation) { invocationCount++; switch (invocationCount) { case 0: endpoint.emitCurrentSendLinkError(RETRIABLE_LINK_ERROR); return Mono.error(RETRIABLE_LINK_ERROR); case 1: endpoint.emitCurrentSendLinkError(NON_RETRIABLE_ERROR_1); return Mono.error(NON_RETRIABLE_ERROR_1); default: throw new RuntimeException("More than two invocations of send-answer is not expected."); } } }; when(endpoint.getAmqpSendLink(0, 0).send(anyList())).thenAnswer(sendAnswer); when(endpoint.getAmqpSendLink(0, 1).send(anyList())).thenAnswer(sendAnswer); final int messagesCount = 4; final List<ServiceBusMessage> messagesToSend = createMessagesToSend(messagesCount); final ReactorConnectionCache<ServiceBusReactorAmqpConnection> connectionCache = createConnectionCache(connectionSupplier); final ServiceBusSenderAsyncClient sender = createSenderAsyncClient(connectionCache, false); try { try (VirtualTimeStepVerifier verifier = new VirtualTimeStepVerifier()) { verifier.create(() -> sender.sendMessages(messagesToSend)) .thenAwait(VIRTUAL_TIME_SHIFT) .verifyErrorSatisfies(e -> { Assertions.assertTrue(e instanceof ServiceBusException); final ServiceBusException se = (ServiceBusException) e; Assertions.assertSame(NON_RETRIABLE_ERROR_1, se.getCause()); }); } verify(endpoint.getAmqpSendLink(0, 0)).send(sendMessagesCaptor0.capture()); final List<Message> messagesSendInSession0Link0 = sendMessagesCaptor0.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession0Link0.size()); verify(endpoint.getAmqpSendLink(0, 1)).send(sendMessagesCaptor1.capture()); final List<Message> messagesSendInSession0Link1 = sendMessagesCaptor1.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession0Link1.size()); verifyNoInteractions(onClientClosed); } finally { sender.close(); connectionCache.dispose(); } } } @Test @Execution(ExecutionMode.SAME_THREAD) void shouldRecoverFromRetriableSessionError() { final int sessionsCnt = 2; final int[] linksPerSession = new int[] { 1, 1 }; try (MockEndpoint endpoint = createMockEndpoint(sessionsCnt, linksPerSession)) { final Supplier<ServiceBusReactorAmqpConnection> connectionSupplier = singleConnectionSupplier(endpoint); final Answer<Mono<Void>> sendAnswer = new Answer<Mono<Void>>() { private int invocationCount = -1; @Override public Mono<Void> answer(InvocationOnMock invocation) { invocationCount++; switch (invocationCount) { case 0: endpoint.emitCurrentSessionError(RETRIABLE_SESSION_ERROR); return Mono.error(RETRIABLE_SESSION_ERROR); case 1: return Mono.empty(); default: throw new RuntimeException("More than two invocations of send-answer is not expected."); } } }; when(endpoint.getAmqpSendLink(0, 0).send(anyList())).thenAnswer(sendAnswer); when(endpoint.getAmqpSendLink(1, 0).send(anyList())).thenAnswer(sendAnswer); final int messagesCount = 4; final List<ServiceBusMessage> messagesToSend = createMessagesToSend(messagesCount); final ReactorConnectionCache<ServiceBusReactorAmqpConnection> connectionCache = createConnectionCache(connectionSupplier); final ServiceBusSenderAsyncClient sender = createSenderAsyncClient(connectionCache, false); try { try (VirtualTimeStepVerifier verifier = new VirtualTimeStepVerifier()) { verifier.create(() -> sender.sendMessages(messagesToSend)) .thenAwait(VIRTUAL_TIME_SHIFT) .verifyComplete(); } verify(endpoint.getAmqpSendLink(0, 0)).send(sendMessagesCaptor0.capture()); final List<Message> messagesSendInSession0Link0 = sendMessagesCaptor0.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession0Link0.size()); verify(endpoint.getAmqpSendLink(1, 0)).send(sendMessagesCaptor1.capture()); final List<Message> messagesSendInSession1Link0 = sendMessagesCaptor1.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession1Link0.size()); } finally { sender.close(); connectionCache.dispose(); } } } @Test @Execution(ExecutionMode.SAME_THREAD) void shouldBubbleUpNonRetriableSessionError() { final int sessionsCnt = 2; final int[] linksPerSession = new int[] { 1, 1 }; try (MockEndpoint endpoint = createMockEndpoint(sessionsCnt, linksPerSession)) { final Supplier<ServiceBusReactorAmqpConnection> connectionSupplier = singleConnectionSupplier(endpoint); final Answer<Mono<Void>> sendAnswer = new Answer<Mono<Void>>() { private int invocationCount = -1; @Override public Mono<Void> answer(InvocationOnMock invocation) { invocationCount++; switch (invocationCount) { case 0: endpoint.emitCurrentSessionError(RETRIABLE_SESSION_ERROR); return Mono.error(RETRIABLE_SESSION_ERROR); case 1: endpoint.emitCurrentSessionError(NON_RETRIABLE_ERROR_1); return Mono.error(NON_RETRIABLE_ERROR_1); default: throw new RuntimeException("More than two invocations of send-answer is not expected."); } } }; when(endpoint.getAmqpSendLink(0, 0).send(anyList())).thenAnswer(sendAnswer); when(endpoint.getAmqpSendLink(1, 0).send(anyList())).thenAnswer(sendAnswer); final int messagesCount = 4; final List<ServiceBusMessage> messagesToSend = createMessagesToSend(messagesCount); final ReactorConnectionCache<ServiceBusReactorAmqpConnection> connectionCache = createConnectionCache(connectionSupplier); final ServiceBusSenderAsyncClient sender = createSenderAsyncClient(connectionCache, false); try { try (VirtualTimeStepVerifier verifier = new VirtualTimeStepVerifier()) { verifier.create(() -> sender.sendMessages(messagesToSend)) .thenAwait(VIRTUAL_TIME_SHIFT) .verifyErrorSatisfies(e -> { Assertions.assertTrue(e instanceof ServiceBusException); final ServiceBusException se = (ServiceBusException) e; Assertions.assertSame(NON_RETRIABLE_ERROR_1, se.getCause()); }); } verify(endpoint.getAmqpSendLink(0, 0)).send(sendMessagesCaptor0.capture()); final List<Message> messagesSendInSession0Link0 = sendMessagesCaptor0.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession0Link0.size()); verify(endpoint.getAmqpSendLink(1, 0)).send(sendMessagesCaptor1.capture()); final List<Message> messagesSendInSession1Link0 = sendMessagesCaptor1.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession1Link0.size()); verifyNoInteractions(onClientClosed); } finally { sender.close(); connectionCache.dispose(); } } } @Test @Execution(ExecutionMode.SAME_THREAD) void shouldSenderReusableAfterNonRetriableLinkAndSessionError() { final int sessionsCnt = 3; final int[] linksPerSession = new int[] { 1, 1, 1 }; try (MockEndpoint endpoint = createMockEndpoint(sessionsCnt, linksPerSession)) { final Supplier<ServiceBusReactorAmqpConnection> connectionSupplier = new Supplier<ServiceBusReactorAmqpConnection>() { private int invocationCount = -1; @Override public ServiceBusReactorAmqpConnection get() { invocationCount++; switch (invocationCount) { case 0: final ServiceBusReactorAmqpConnection c = endpoint.arrange(); return c; default: throw new RuntimeException("More than one invocation of connection-supplier is not expected."); } } }; final Answer<Mono<Void>> sendAnswer = new Answer<Mono<Void>>() { private int invocationCount = -1; @Override public Mono<Void> answer(InvocationOnMock invocation) { invocationCount++; switch (invocationCount) { case 0: endpoint.emitCurrentSendLinkError(NON_RETRIABLE_ERROR_1); return Mono.error(NON_RETRIABLE_ERROR_1); case 1: endpoint.emitCurrentSessionError(NON_RETRIABLE_ERROR_2); return Mono.error(NON_RETRIABLE_ERROR_2); case 2: return Mono.empty(); default: throw new RuntimeException("More than three invocations of send-answer is not expected."); } } }; when(endpoint.getAmqpSendLink(0, 0).send(anyList())).thenAnswer(sendAnswer); when(endpoint.getAmqpSendLink(1, 0).send(anyList())).thenAnswer(sendAnswer); when(endpoint.getAmqpSendLink(2, 0).send(any(Message.class))).thenAnswer(sendAnswer); final int messagesCount = 4; final List<ServiceBusMessage> messagesToSend = createMessagesToSend(messagesCount); final ServiceBusMessage messageToSend = createMessageToSend(); final ReactorConnectionCache<ServiceBusReactorAmqpConnection> connectionCache = createConnectionCache(connectionSupplier); final ServiceBusSenderAsyncClient sender = createSenderAsyncClient(connectionCache, false); try { try (VirtualTimeStepVerifier verifier = new VirtualTimeStepVerifier()) { verifier.create(() -> sender.sendMessages(messagesToSend)) .thenAwait(VIRTUAL_TIME_SHIFT) .verifyErrorSatisfies(e -> { Assertions.assertTrue(e instanceof ServiceBusException); final ServiceBusException se = (ServiceBusException) e; Assertions.assertSame(NON_RETRIABLE_ERROR_1, se.getCause()); }); } try (VirtualTimeStepVerifier verifier = new VirtualTimeStepVerifier()) { verifier.create(() -> sender.sendMessages(messagesToSend)) .thenAwait(VIRTUAL_TIME_SHIFT) .verifyErrorSatisfies(e -> { Assertions.assertTrue(e instanceof ServiceBusException); final ServiceBusException se = (ServiceBusException) e; Assertions.assertSame(NON_RETRIABLE_ERROR_2, se.getCause()); }); } try (VirtualTimeStepVerifier verifier = new VirtualTimeStepVerifier()) { verifier.create(() -> sender.sendMessage(messageToSend)) .thenAwait(VIRTUAL_TIME_SHIFT) .verifyComplete(); } verify(endpoint.getAmqpSendLink(0, 0)).send(sendMessagesCaptor0.capture()); final List<Message> messagesSendInSession0Link0 = sendMessagesCaptor0.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession0Link0.size()); verify(endpoint.getAmqpSendLink(1, 0)).send(sendMessagesCaptor1.capture()); final List<Message> messagesSendInSession1Link0 = sendMessagesCaptor1.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession1Link0.size()); verify(endpoint.getAmqpSendLink(2, 0), times(1)).send(any(Message.class)); } finally { sender.close(); connectionCache.dispose(); } } } @Test @Execution(ExecutionMode.SAME_THREAD) void shouldRecoverFromRetriableConnectionError() { final int endpointsCount = 4; final List<SessionLinkCount> sessionLinkCountList = new ArrayList<>(endpointsCount); sessionLinkCountList.add(new SessionLinkCount(1, new int[] { 1 })); sessionLinkCountList.add(new SessionLinkCount(1, new int[] { 1 })); sessionLinkCountList.add(new SessionLinkCount(1, new int[] { 1 })); sessionLinkCountList.add(new SessionLinkCount(1, new int[] { 1 })); try (MockEndpoints endpoints = createMockEndpoints(sessionLinkCountList)) { final AtomicReference<MockEndpoint> currentEndpoint = new AtomicReference<>(); final Supplier<ServiceBusReactorAmqpConnection> connectionSupplier = new Supplier<ServiceBusReactorAmqpConnection>() { private int endpointIndex = -1; @Override public ServiceBusReactorAmqpConnection get() { endpointIndex++; if (endpointIndex >= endpointsCount) { throw new RuntimeException("More than " + endpointsCount + " invocation of connection-supplier is not expected."); } final MockEndpoint e = endpoints.get(endpointIndex); currentEndpoint.set(e); final ServiceBusReactorAmqpConnection c = e.arrange(); return c; } }; final Answer<Mono<Void>> sendAnswer = new Answer<Mono<Void>>() { private int invocationCount = -1; @Override public Mono<Void> answer(InvocationOnMock invocation) { invocationCount++; switch (invocationCount) { case 0: Assertions.assertEquals(endpoints.get(0), currentEndpoint.get()); endpoints.get(0).emitCurrentSendLinkError(RETRIABLE_LINK_ERROR); return Mono.error(RETRIABLE_LINK_ERROR); case 1: Assertions.assertEquals(endpoints.get(1), currentEndpoint.get()); endpoints.get(1).emitCurrentSessionError(RETRIABLE_SESSION_ERROR); return Mono.error(RETRIABLE_SESSION_ERROR); case 2: Assertions.assertEquals(endpoints.get(2), currentEndpoint.get()); endpoints.get(2).emitConnectionError(RETRIABLE_CONNECTION_ERROR); return Mono.error(RETRIABLE_CONNECTION_ERROR); case 3: Assertions.assertEquals(endpoints.get(3), currentEndpoint.get()); return Mono.empty(); default: throw new RuntimeException("More than three invocations of send-answer is not expected."); } } }; when(endpoints.get(0).getAmqpSendLink(0, 0).send(anyList())).thenAnswer(sendAnswer); when(endpoints.get(1).getAmqpSendLink(0, 0).send(anyList())).thenAnswer(sendAnswer); when(endpoints.get(2).getAmqpSendLink(0, 0).send(anyList())).thenAnswer(sendAnswer); when(endpoints.get(3).getAmqpSendLink(0, 0).send(anyList())).thenAnswer(sendAnswer); final int messagesCount = 4; final List<ServiceBusMessage> messagesToSend = createMessagesToSend(messagesCount); final ReactorConnectionCache<ServiceBusReactorAmqpConnection> connectionCache = createConnectionCache(connectionSupplier); final ServiceBusSenderAsyncClient sender = createSenderAsyncClient(connectionCache, false); try { try (VirtualTimeStepVerifier verifier = new VirtualTimeStepVerifier()) { verifier.create(() -> sender.sendMessages(messagesToSend)) .thenAwait(VIRTUAL_TIME_SHIFT) .verifyComplete(); } verify(endpoints.get(0).getAmqpSendLink(0, 0), times(1)).send(anyList()); verify(endpoints.get(1).getAmqpSendLink(0, 0), times(1)).send(anyList()); verify(endpoints.get(2).getAmqpSendLink(0, 0), times(1)).send(anyList()); verify(endpoints.get(3).getAmqpSendLink(0, 0), times(1)).send(anyList()); } finally { sender.close(); connectionCache.dispose(); } } } private MockEndpoint createMockEndpoint(int sessionsCnt, int[] linksPerSession) { final String connectionId = "1"; return MockEndpoint.create(connectionId, QUEUE_NAME, RETRY_OPTIONS, sessionsCnt, linksPerSession); } private MockEndpoints createMockEndpoints(List<SessionLinkCount> sessionLinkCountList) { return MockEndpoints.create(QUEUE_NAME, RETRY_OPTIONS, sessionLinkCountList); } private Supplier<ServiceBusReactorAmqpConnection> singleConnectionSupplier(MockEndpoint endpoint) { final Supplier<ServiceBusReactorAmqpConnection> connectionSupplier = new Supplier<ServiceBusReactorAmqpConnection>() { private int invocationCount = -1; @Override public ServiceBusReactorAmqpConnection get() { invocationCount++; switch (invocationCount) { case 0: final ServiceBusReactorAmqpConnection c = endpoint.arrange(); return c; default: throw new RuntimeException("More than one invocation of connection-supplier is not expected."); } } }; return connectionSupplier; } private ReactorConnectionCache<ServiceBusReactorAmqpConnection> createConnectionCache( Supplier<ServiceBusReactorAmqpConnection> connectionSupplier) { return new ReactorConnectionCache<>(connectionSupplier, FQDN, QUEUE_NAME, getRetryPolicy(RETRY_OPTIONS), new HashMap<>()); } private ServiceBusSenderAsyncClient createSenderAsyncClient( ReactorConnectionCache<ServiceBusReactorAmqpConnection> connectionCache, boolean isSharedConnection) { final ConnectionCacheWrapper connectionSupport = new ConnectionCacheWrapper(connectionCache); return new ServiceBusSenderAsyncClient(QUEUE_NAME, MessagingEntityType.QUEUE, connectionSupport, RETRY_OPTIONS, DEFAULT_INSTRUMENTATION, messageSerializer, onClientClosed, "", CLIENT_IDENTIFIER); } private static List<ServiceBusMessage> createMessagesToSend(int messagesCount) { return IntStream.range(0, messagesCount) .mapToObj(__ -> new ServiceBusMessage(TEST_CONTENTS)) .collect(Collectors.toList()); } private static ServiceBusMessage createMessageToSend() { final ServiceBusMessage messageToSend = new ServiceBusMessage(TEST_CONTENTS); return messageToSend; } private static final class VirtualTimeStepVerifier implements AutoCloseable { private final VirtualTimeScheduler scheduler; VirtualTimeStepVerifier() { scheduler = VirtualTimeScheduler.create(); } <T> StepVerifier.Step<T> create(Supplier<Mono<T>> scenarioSupplier) { return StepVerifier.withVirtualTime(scenarioSupplier, () -> scheduler, 0); } @Override public void close() { scheduler.dispose(); } } private static class SessionLinkCount { private final int sessionsCnt; private final int[] linksPerSession; SessionLinkCount(int sessionsCnt, int[] linksPerSession) { this.sessionsCnt = sessionsCnt; this.linksPerSession = linksPerSession; } } private static final class MockEndpoints implements Closeable { private final List<MockEndpoint> mockEndpoints; private final int mockEndpointsCnt; private MockEndpoints(List<MockEndpoint> mockEndpoints) { this.mockEndpoints = mockEndpoints; this.mockEndpointsCnt = this.mockEndpoints.size(); } static MockEndpoints create(String queueName, AmqpRetryOptions retryOptions, List<SessionLinkCount> sessionLinkCounts) { final List<MockEndpoint> mockEndpoints = new ArrayList<>(sessionLinkCounts.size()); int conId = 1; for (SessionLinkCount slc : sessionLinkCounts) { mockEndpoints.add(MockEndpoint.create(String.valueOf(conId), queueName, retryOptions, slc.sessionsCnt, slc.linksPerSession)); conId++; } return new MockEndpoints(mockEndpoints); } MockEndpoint get(int index) { if (index >= mockEndpointsCnt) { throw new IndexOutOfBoundsException("index:" + index + " maxIndex: " + (mockEndpointsCnt - 1)); } return mockEndpoints.get(index); } @Override public void close() { for (MockEndpoint mockEndpoint : mockEndpoints) { mockEndpoint.close(); } } } private static final class MockEndpoint implements Closeable { private final String connectionId; private final String queueName; private final AmqpRetryOptions retryOptions; private final MockSendSessions mockSendSessions; private final ConnectionOptions connectionOptions; private final Connection connection; private final Reactor reactor; private final ReactorDispatcher reactorDispatcher; private final ReactorExecutor reactorExecutor; private final ReactorProvider reactorProvider; private final ConnectionHandler connectionHandler; private final Sinks.Many<EndpointState> connectionStateSink; private final ReactorHandlerProvider handlerProvider; private final ServiceBusAmqpLinkProvider linkProvider; private final TokenManagerProvider tokenManagerProvider; private final TokenManager tokenManager; private final MessageSerializer messageSerializer; private final AtomicBoolean arranged = new AtomicBoolean(false); private MockEndpoint(String connectionId, String queueName, AmqpRetryOptions retryOptions, MockSendSessions mockSendSessions, ConnectionOptions connectionOptions, Connection connection, Reactor reactor, ReactorDispatcher reactorDispatcher, ReactorExecutor reactorExecutor, ReactorProvider reactorProvider, ConnectionHandler connectionHandler, Sinks.Many<EndpointState> connectionStateSink, ReactorHandlerProvider handlerProvider, ServiceBusAmqpLinkProvider linkProvider, TokenManagerProvider tokenManagerProvider, TokenManager tokenManager, MessageSerializer messageSerializer) { this.connectionId = connectionId; this.queueName = queueName; this.retryOptions = retryOptions; this.mockSendSessions = mockSendSessions; this.connectionOptions = connectionOptions; this.connection = connection; this.reactor = reactor; this.reactorDispatcher = reactorDispatcher; this.reactorExecutor = reactorExecutor; this.reactorProvider = reactorProvider; this.connectionHandler = connectionHandler; this.connectionStateSink = connectionStateSink; this.handlerProvider = handlerProvider; this.linkProvider = linkProvider; this.tokenManagerProvider = tokenManagerProvider; this.tokenManager = tokenManager; this.messageSerializer = messageSerializer; } static MockEndpoint create(String connectionId, String queueName, AmqpRetryOptions retryOptions, int sessionsCnt, int[] linksPerSession) { Assertions.assertNotNull(retryOptions); Assertions.assertTrue(sessionsCnt > 0, "sessionsCnt must be > 0."); Assertions.assertEquals(sessionsCnt, linksPerSession.length); for (int linksCnt = 0; linksCnt < linksPerSession.length; linksCnt++) { Assertions.assertTrue(linksCnt >= 0, "links-count in linksPerSession must be >= 0."); } final MockSendSessions mockSendSessions = MockSendSessions.create(connectionId, sessionsCnt, linksPerSession); final ConnectionOptions connectionOptions = mock(ConnectionOptions.class); final Connection connection = mock(Connection.class); final Sinks.Many<EndpointState> connectionStateSink = Sinks.many().replay() .latestOrDefault(EndpointState.UNINITIALIZED); final ConnectionHandler connectionHandler = mock(ConnectionHandler.class); final Reactor reactor = mock(Reactor.class); final ReactorDispatcher reactorDispatcher = mock(ReactorDispatcher.class); final ReactorExecutor reactorExecutor = mock(ReactorExecutor.class); final ReactorProvider reactorProvider = mock(ReactorProvider.class); final ReactorHandlerProvider handlerProvider = mock(ReactorHandlerProvider.class); final ServiceBusAmqpLinkProvider linkProvider = mock(ServiceBusAmqpLinkProvider.class); final TokenManager tokenManager = mock(TokenManager.class); final TokenManagerProvider tokenManagerProvider = mock(TokenManagerProvider.class); final MessageSerializer messageSerializer = mock(MessageSerializer.class); return new MockEndpoint(connectionId, queueName, retryOptions, mockSendSessions, connectionOptions, connection, reactor, reactorDispatcher, reactorExecutor, reactorProvider, connectionHandler, connectionStateSink, handlerProvider, linkProvider, tokenManagerProvider, tokenManager, messageSerializer); } ServiceBusReactorAmqpConnection arrange() { if (arranged.getAndSet(true)) { throw new RuntimeException("Only one connection can be obtained from a MockEndpoint instance."); } mockSendSessions.arrange(handlerProvider, linkProvider, connection, connectionStateSink); when(connectionOptions.getRetry()).thenReturn(retryOptions); doNothing().when(connection).close(); connectionStateSink.emitNext(EndpointState.ACTIVE, Sinks.EmitFailureHandler.FAIL_FAST); when(connectionHandler.getEndpointStates()).thenReturn(connectionStateSink.asFlux().distinctUntilChanged()); doNothing().when(connectionHandler).close(); when(reactor.connectionToHost(any(), anyInt(), any())).thenReturn(connection); try { doAnswer(invocation -> { final Runnable work = invocation.getArgument(0); work.run(); return null; }).when(reactorDispatcher).invoke(any(Runnable.class)); } catch (IOException ioe) { throw new UncheckedIOException(ioe); } when(reactorDispatcher.getShutdownSignal()).thenReturn(Mono.empty()); doNothing().when(reactorExecutor).start(); when(reactorExecutor.closeAsync()).thenReturn(Mono.empty()); try { when(reactorProvider.createReactor(anyString(), anyInt())).thenReturn(reactor); } catch (IOException ioe) { throw new UncheckedIOException(ioe); } when(reactorProvider.getReactorDispatcher()).thenReturn(reactorDispatcher); when(reactorProvider.createExecutor(any(), anyString(), any(), any(), any())).thenReturn(reactorExecutor); when(handlerProvider.createConnectionHandler(anyString(), any())).thenReturn(connectionHandler); when(tokenManager.authorize()).thenReturn(Mono.just(Duration.ofHours(1).toMillis())); when(tokenManagerProvider.getTokenManager(any(), anyString())).thenReturn(tokenManager); final boolean isV2 = true; return new ServiceBusReactorAmqpConnection(connectionId, connectionOptions, reactorProvider, handlerProvider, linkProvider, tokenManagerProvider, messageSerializer, false, isV2); } AmqpSendLink getAmqpSendLink(int sessionIdx, int linkIdx) { return mockSendSessions.getAmqpSendLink(sessionIdx, linkIdx); } void emitConnectionError(Throwable throwable) { connectionStateSink.emitError(throwable, Sinks.EmitFailureHandler.FAIL_FAST); } void emitConnectionCompletion() { connectionStateSink.emitComplete(Sinks.EmitFailureHandler.FAIL_FAST); } void emitCurrentSessionState(EndpointState state) { final MockSendSession session = mockSendSessions.getCurrentSendSession(); session.emitSessionState(state); } void emitCurrentSessionError(Throwable throwable) { final MockSendSession session = mockSendSessions.getCurrentSendSession(); session.emitSessionError(throwable); } void emitCurrentSessionCompletion() { final MockSendSession session = mockSendSessions.getCurrentSendSession(); session.emitSessionCompletion(); } void emitCurrentSendLinkState(EndpointState state) { final MockSendLink sendLink = mockSendSessions.getCurrentSendLink(); sendLink.emitSendLinkState(state); } void emitCurrentSendLinkError(Throwable throwable) { final MockSendLink sendLink = mockSendSessions.getCurrentSendLink(); sendLink.emitSendLinkError(throwable); } void emitCurrentSendLinkCompletion() { final MockSendLink sendLink = mockSendSessions.getCurrentSendLink(); sendLink.emitSendLinkCompletion(); } @Override public void close() { Mockito.framework().clearInlineMock(connectionOptions); Mockito.framework().clearInlineMock(connection); Mockito.framework().clearInlineMock(connectionHandler); Mockito.framework().clearInlineMock(reactor); Mockito.framework().clearInlineMock(reactorDispatcher); Mockito.framework().clearInlineMock(reactorExecutor); Mockito.framework().clearInlineMock(reactorProvider); Mockito.framework().clearInlineMock(handlerProvider); Mockito.framework().clearInlineMock(linkProvider); Mockito.framework().clearInlineMock(tokenManager); Mockito.framework().clearInlineMock(tokenManagerProvider); Mockito.framework().clearInlineMock(messageSerializer); mockSendSessions.close(); } } private static final class MockSendSessions implements Closeable { private final Object lock = new Object(); private final List<MockSendSession> mockSendSessions; private final MockSendSession terminalMockSendSession; private final int sessionsCnt; private int sessionIdx; private MockSendSession currentMockSendSession; private MockSendSessions(List<MockSendSession> mockSendSessions, MockSendSession terminalMockSendSession) { this.mockSendSessions = mockSendSessions; this.terminalMockSendSession = terminalMockSendSession; this.sessionsCnt = this.mockSendSessions.size(); this.sessionIdx = 0; } static MockSendSessions create(String connectionId, int sessionsCnt, int[] linksPerSession) { final List<MockSendSession> mockSendSessions = new ArrayList<>(sessionsCnt); for (int i = 0; i < sessionsCnt; i++) { mockSendSessions.add(MockSendSession.create(connectionId, linksPerSession[i])); } final MockSendSession terminalMockSendSession = MockSendSession.create(connectionId, 0); return new MockSendSessions(Collections.unmodifiableList(mockSendSessions), terminalMockSendSession); } void arrange(ReactorHandlerProvider handlerProvider, AmqpLinkProvider linkProvider, Connection connection, Sinks.Many<EndpointState> connectionStateSink) { for (MockSendSession mockSession : mockSendSessions) { mockSession.arrange(); mockSession.emitSessionState(EndpointState.ACTIVE); } terminalMockSendSession.arrange(); terminalMockSendSession.emitSessionCompletion(); when(handlerProvider.createSessionHandler(anyString(), any(), anyString(), any())) .thenAnswer(invocation -> { final MockSendSession session = moveToNextSendSession(connectionStateSink); return session.getSessionHandler(); }); when(handlerProvider.createSendLinkHandler(anyString(), any(), anyString(), anyString())) .thenAnswer(invocation -> { final MockSendLink sendLink = moveToNextSendLinkInCurrentSession(); return sendLink.getSendLinkHandler(); }); when(linkProvider.createSendLink(any(ServiceBusReactorAmqpConnection.class), anyString(), any(Sender.class), any(SendLinkHandler.class), any(ReactorProvider.class), any(TokenManager.class), any(MessageSerializer.class), any(AmqpRetryOptions.class), any(Scheduler.class), any())) .thenAnswer(invocation -> { final SendLinkHandler sendLinkHandler = invocation.getArgument(3); final AmqpSendLink amqpSendLink = lookupAmqpSendLinkFor(sendLinkHandler); return amqpSendLink; }); final ArrayList<Session> qpidSessions = new ArrayList<>(sessionsCnt + 1); for (MockSendSession mockSendSession : mockSendSessions) { qpidSessions.add(mockSendSession.getQpidSession()); } qpidSessions.add(terminalMockSendSession.getQpidSession()); when(connection.session()) .thenReturn(qpidSessions.get(0), qpidSessions.subList(1, sessionsCnt + 1).toArray(new Session[0])); } AmqpSendLink getAmqpSendLink(int sessionIdx, int linkIdx) { Assertions.assertTrue(sessionIdx >= 0 && sessionIdx < sessionsCnt, "sessionIdx is not in range."); final MockSendSession session = mockSendSessions.get(sessionIdx); return session.getAmqpSendLink(linkIdx); } MockSendSession getCurrentSendSession() { final MockSendSession session; synchronized (lock) { session = Objects.requireNonNull(currentMockSendSession, "Current Session is null"); } return session; } MockSendLink getCurrentSendLink() { final MockSendLink sendLink; synchronized (lock) { final MockSendSession session = getCurrentSendSession(); sendLink = session.getCurrentSendLink(); } return sendLink; } private MockSendSession moveToNextSendSession(Sinks.Many<EndpointState> connectionStateSink) { final MockSendSession nextSession; synchronized (lock) { if (sessionIdx >= sessionsCnt) { nextSession = terminalMockSendSession; } else { nextSession = mockSendSessions.get(sessionIdx); sessionIdx++; } this.currentMockSendSession = nextSession; } if (isTerminalSession(nextSession)) { connectionStateSink.emitComplete(Sinks.EmitFailureHandler.FAIL_FAST); } return nextSession; } private MockSendLink moveToNextSendLinkInCurrentSession() { final MockSendSession session; final MockSendLink nextSendLink; synchronized (lock) { session = Objects.requireNonNull(currentMockSendSession, "Current Session is null"); nextSendLink = session.moveToNextSendLink(); } if (session.isTerminalSendLink(nextSendLink)) { session.emitSessionCompletion(); } return nextSendLink; } private AmqpSendLink lookupAmqpSendLinkFor(SendLinkHandler sendLinkHandler) { for (MockSendSession mockSendSession : mockSendSessions) { final AmqpSendLink amqpSendLink = mockSendSession.lookupAmqpSendLinkFor(sendLinkHandler); if (amqpSendLink != null) { return amqpSendLink; } } final AmqpSendLink amqpSendLink = terminalMockSendSession.lookupAmqpSendLinkFor(sendLinkHandler); if (amqpSendLink != null) { return amqpSendLink; } throw new NullPointerException("Lookup for AmqpSendLink failed."); } private boolean isTerminalSession(MockSendSession session) { return session == terminalMockSendSession; } @Override public void close() { for (MockSendSession mockSendSession : mockSendSessions) { mockSendSession.close(); } terminalMockSendSession.close(); } } private static final class MockSendSession implements Closeable { private final String connectionId; private final Session session; private final Record sessionAttachments; private final SessionHandler sessionHandler; private final Sinks.Many<EndpointState> sessionStateSink; private final List<MockSendLink> mockSendLinks; private final MockSendLink terminalMockSendLink; private final int sendLinkCnt; private int sendLinkIdx; private MockSendLink currentMockSendLink; private MockSendSession(String connectionId, Session session, Record sessionAttachments, SessionHandler sessionHandler, Sinks.Many<EndpointState> sessionStateSink, List<MockSendLink> mockSendLinks, MockSendLink terminalMockSendLink) { this.connectionId = connectionId; this.session = session; this.sessionAttachments = sessionAttachments; this.sessionHandler = sessionHandler; this.sessionStateSink = sessionStateSink; this.mockSendLinks = mockSendLinks; this.terminalMockSendLink = terminalMockSendLink; this.sendLinkCnt = this.mockSendLinks.size(); this.sendLinkIdx = 0; } static MockSendSession create(String connectionId, int sendLinkCnt) { final List<MockSendLink> mockSendLinks = new ArrayList<>(sendLinkCnt); for (int i = 0; i < sendLinkCnt; i++) { mockSendLinks.add(MockSendLink.create()); } final MockSendLink terminalMockSendLink = MockSendLink.create(); final Record sessionAttachments = mock(Record.class); final Session session = mock(Session.class); final SessionHandler sessionHandler = mock(SessionHandler.class); final Sinks.Many<EndpointState> sessionStateSink = Sinks.many().replay() .latestOrDefault(EndpointState.UNINITIALIZED); return new MockSendSession(connectionId, session, sessionAttachments, sessionHandler, sessionStateSink, Collections.unmodifiableList(mockSendLinks), terminalMockSendLink); } void arrange() { for (MockSendLink mockSendLink : mockSendLinks) { mockSendLink.arrange(); mockSendLink.emitSendLinkState(EndpointState.ACTIVE); } terminalMockSendLink.arrange(); terminalMockSendLink.emitSendLinkCompletion(); final Answer<Mono<Void>> terminalSendAnswer = new Answer<Mono<Void>>() { @Override public Mono<Void> answer(InvocationOnMock invocation) { return Mono.error(new AmqpException(true, "terminal-send-link-result", null)); } }; when(terminalMockSendLink.getAmqpSendLink().send(anyList())).then(terminalSendAnswer); when(terminalMockSendLink.getAmqpSendLink().send(any(Message.class))).then(terminalSendAnswer); when(terminalMockSendLink.getAmqpSendLink().send(any(Message.class), any(DeliveryState.class))).then(terminalSendAnswer); when(terminalMockSendLink.getAmqpSendLink().send(anyList(), any(DeliveryState.class))).then(terminalSendAnswer); when(terminalMockSendLink.getAmqpSendLink().send(any(), anyInt(), anyInt(), any(DeliveryState.class))).then(terminalSendAnswer); doNothing().when(sessionAttachments).set(any(), any(), anyString()); when(session.attachments()).thenReturn(sessionAttachments); doNothing().when(session).open(); doNothing().when(session).setCondition(any()); when(sessionHandler.getConnectionId()).thenReturn(connectionId); when(sessionHandler.getEndpointStates()).thenReturn(sessionStateSink.asFlux().distinctUntilChanged()); doNothing().when(sessionHandler).close(); final ArrayList<Sender> qpidSenders = new ArrayList<>(sendLinkCnt + 1); for (MockSendLink mockSendLink : mockSendLinks) { qpidSenders.add(mockSendLink.getQpidSender()); } qpidSenders.add(terminalMockSendLink.getQpidSender()); when(session.sender(any())) .thenReturn(qpidSenders.get(0), qpidSenders.subList(1, sendLinkCnt + 1).toArray(new Sender[0])); } Session getQpidSession() { return session; } SessionHandler getSessionHandler() { return sessionHandler; } AmqpSendLink getAmqpSendLink(int linkIdx) { Assertions.assertTrue(linkIdx >= 0 && linkIdx < sendLinkCnt, "linkIdx is not in range."); return mockSendLinks.get(linkIdx).getAmqpSendLink(); } MockSendLink moveToNextSendLink() { final MockSendLink nextSendLink; if (sendLinkIdx >= sendLinkCnt) { nextSendLink = terminalMockSendLink; } else { nextSendLink = mockSendLinks.get(sendLinkIdx); sendLinkIdx++; } currentMockSendLink = nextSendLink; return nextSendLink; } boolean isTerminalSendLink(MockSendLink link) { return this.terminalMockSendLink == link; } MockSendLink getCurrentSendLink() { return Objects.requireNonNull(currentMockSendLink, "Current Link is null"); } AmqpSendLink lookupAmqpSendLinkFor(SendLinkHandler sendLinkHandler) { for (MockSendLink sendLink : mockSendLinks) { if (sendLink.getSendLinkHandler() == sendLinkHandler) { return sendLink.getAmqpSendLink(); } } if (terminalMockSendLink.getSendLinkHandler() == sendLinkHandler) { return terminalMockSendLink.getAmqpSendLink(); } return null; } void emitSessionState(EndpointState state) { this.sessionStateSink.emitNext(state, Sinks.EmitFailureHandler.FAIL_FAST); } void emitSessionError(Throwable error) { this.sessionStateSink.emitError(error, Sinks.EmitFailureHandler.FAIL_FAST); } void emitSessionCompletion() { this.sessionStateSink.emitComplete(Sinks.EmitFailureHandler.FAIL_FAST); } @Override public void close() { Mockito.framework().clearInlineMock(session); Mockito.framework().clearInlineMock(sessionAttachments); Mockito.framework().clearInlineMock(sessionHandler); for (MockSendLink sendLink : mockSendLinks) { sendLink.close(); } terminalMockSendLink.close(); } } private static final class MockSendLink implements Closeable { private final Sender sender; private final Record senderAttachments; private final AmqpSendLink amqpSendLink; private final SendLinkHandler sendLinkHandler; private final Sinks.Many<EndpointState> sendLinkStateSink; private MockSendLink(Sender sender, Record senderAttachments, AmqpSendLink amqpSendLink, SendLinkHandler sendLinkHandler, Sinks.Many<EndpointState> sendLinkStateSink) { this.sender = sender; this.senderAttachments = senderAttachments; this.amqpSendLink = amqpSendLink; this.sendLinkHandler = sendLinkHandler; this.sendLinkStateSink = sendLinkStateSink; } static MockSendLink create() { final Record senderAttachments = mock(Record.class); final Sender sender = mock(Sender.class); final AmqpSendLink amqpSendLink = mock(AmqpSendLink.class); final SendLinkHandler sendLinkHandler = mock(SendLinkHandler.class); final Sinks.Many<EndpointState> sendLinkStateSink = Sinks.many().replay() .latestOrDefault(EndpointState.UNINITIALIZED); return new MockSendLink(sender, senderAttachments, amqpSendLink, sendLinkHandler, sendLinkStateSink); } void arrange() { doNothing().when(senderAttachments).set(any(), any(), anyString()); when(sender.attachments()).thenReturn(senderAttachments); doNothing().when(sender).setTarget(any()); doNothing().when(sender).setSenderSettleMode(any()); doNothing().when(sender).setProperties(any()); doNothing().when(sender).setSource(any()); doNothing().when(sender).open(); when(amqpSendLink.getLinkSize()).thenReturn(Mono.just(ServiceBusSenderAsyncClient.MAX_MESSAGE_LENGTH_BYTES)); when(amqpSendLink.getEndpointStates()) .thenReturn(sendLinkStateSink.asFlux().distinctUntilChanged().map(state -> toAmqpEndpointState(state))); when(sendLinkHandler.getEndpointStates()).thenReturn(sendLinkStateSink.asFlux().distinctUntilChanged()); doNothing().when(sendLinkHandler).close(); } Sender getQpidSender() { return sender; } AmqpSendLink getAmqpSendLink() { return amqpSendLink; } SendLinkHandler getSendLinkHandler() { return sendLinkHandler; } void emitSendLinkState(EndpointState state) { this.sendLinkStateSink.emitNext(state, Sinks.EmitFailureHandler.FAIL_FAST); } void emitSendLinkError(Throwable error) { this.sendLinkStateSink.emitError(error, Sinks.EmitFailureHandler.FAIL_FAST); } void emitSendLinkCompletion() { this.sendLinkStateSink.emitComplete(Sinks.EmitFailureHandler.FAIL_FAST); } private static AmqpEndpointState toAmqpEndpointState(EndpointState state) { switch (state) { case ACTIVE: return AmqpEndpointState.ACTIVE; case UNINITIALIZED: return AmqpEndpointState.UNINITIALIZED; case CLOSED: return AmqpEndpointState.CLOSED; default: throw new IllegalArgumentException("This endpoint state is not supported. State:" + state); } } @Override public void close() { Mockito.framework().clearInlineMock(sender); Mockito.framework().clearInlineMock(senderAttachments); Mockito.framework().clearInlineMock(amqpSendLink); Mockito.framework().clearInlineMock(sendLinkHandler); } } }
class ServiceBusSenderAsyncClientRecoveryIsolatedTest { private static final BinaryData TEST_CONTENTS = BinaryData.fromString("My message for service bus queue!"); private static final String FQDN = "contoso-shopping.servicebus.windows.net"; private static final String QUEUE_NAME = "orders"; private static final String CLIENT_IDENTIFIER = "client-identifier"; private static final ServiceBusSenderInstrumentation DEFAULT_INSTRUMENTATION = new ServiceBusSenderInstrumentation( null, null, FQDN, QUEUE_NAME); private static final Duration VIRTUAL_TIME_SHIFT = OPERATION_TIMEOUT.plusSeconds(30); private static final AmqpException RETRIABLE_LINK_ERROR = new AmqpException(true, AmqpErrorCondition.LINK_DETACH_FORCED, "detach-link-error", new AmqpErrorContext(FQDN)); private static final AmqpException RETRIABLE_SESSION_ERROR = new AmqpException(true, "session-error", new AmqpErrorContext(FQDN)); private static final AmqpException RETRIABLE_CONNECTION_ERROR = new AmqpException(true, AmqpErrorCondition.CONNECTION_FORCED, "connection-forced-error", new AmqpErrorContext(FQDN)); private static final AmqpException NON_RETRIABLE_ERROR_1 = new AmqpException(false, AmqpErrorCondition.NOT_ALLOWED, "not-allowed-error-1", new AmqpErrorContext(FQDN)); private static final AmqpException NON_RETRIABLE_ERROR_2 = new AmqpException(false, AmqpErrorCondition.NOT_ALLOWED, "not-allowed-error-2", new AmqpErrorContext(FQDN)); private static final AmqpRetryOptions RETRY_OPTIONS = new AmqpRetryOptions() .setMode(AmqpRetryMode.FIXED) .setMaxRetries(10) .setMaxDelay(Duration.ofSeconds(5)) .setDelay(Duration.ofSeconds(1)) .setTryTimeout(OPERATION_TIMEOUT); private final MessageSerializer messageSerializer = new ServiceBusMessageSerializer(); @Mock private Runnable onClientClosed; @Captor private ArgumentCaptor<List<Message>> sendMessagesCaptor0; @Captor private ArgumentCaptor<List<Message>> sendMessagesCaptor1; private AutoCloseable mocksCloseable; @BeforeEach void setup() { mocksCloseable = MockitoAnnotations.openMocks(this); } @AfterEach void teardown() throws Exception { Mockito.framework().clearInlineMock(this); if (mocksCloseable != null) { mocksCloseable.close(); } } @Test @Execution(ExecutionMode.SAME_THREAD) @Test @Execution(ExecutionMode.SAME_THREAD) void shouldBubbleUpNonRetriableSendLinkError() { final int sessionsCnt = 1; final int[] linksPerSession = new int[] { 2 }; try (MockEndpoint endpoint = createMockEndpoint(sessionsCnt, linksPerSession)) { final Supplier<ServiceBusReactorAmqpConnection> connectionSupplier = singleConnectionSupplier(endpoint); final Answer<Mono<Void>> sendAnswer = new Answer<Mono<Void>>() { private int invocationCount = -1; @Override public Mono<Void> answer(InvocationOnMock invocation) { invocationCount++; switch (invocationCount) { case 0: endpoint.emitCurrentSendLinkError(RETRIABLE_LINK_ERROR); return Mono.error(RETRIABLE_LINK_ERROR); case 1: endpoint.emitCurrentSendLinkError(NON_RETRIABLE_ERROR_1); return Mono.error(NON_RETRIABLE_ERROR_1); default: throw new RuntimeException("More than two invocations of send-answer is not expected."); } } }; when(endpoint.getAmqpSendLink(0, 0).send(anyList())).thenAnswer(sendAnswer); when(endpoint.getAmqpSendLink(0, 1).send(anyList())).thenAnswer(sendAnswer); final int messagesCount = 4; final List<ServiceBusMessage> messagesToSend = createMessagesToSend(messagesCount); final ReactorConnectionCache<ServiceBusReactorAmqpConnection> connectionCache = createConnectionCache(connectionSupplier); final ServiceBusSenderAsyncClient sender = createSenderAsyncClient(connectionCache, false); try { try (VirtualTimeStepVerifier verifier = new VirtualTimeStepVerifier()) { verifier.create(() -> sender.sendMessages(messagesToSend)) .thenAwait(VIRTUAL_TIME_SHIFT) .verifyErrorSatisfies(e -> { Assertions.assertTrue(e instanceof ServiceBusException); final ServiceBusException se = (ServiceBusException) e; Assertions.assertSame(NON_RETRIABLE_ERROR_1, se.getCause()); }); } verify(endpoint.getAmqpSendLink(0, 0)).send(sendMessagesCaptor0.capture()); final List<Message> messagesSendInSession0Link0 = sendMessagesCaptor0.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession0Link0.size()); verify(endpoint.getAmqpSendLink(0, 1)).send(sendMessagesCaptor1.capture()); final List<Message> messagesSendInSession0Link1 = sendMessagesCaptor1.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession0Link1.size()); verifyNoInteractions(onClientClosed); } finally { sender.close(); connectionCache.dispose(); } } } @Test @Execution(ExecutionMode.SAME_THREAD) void shouldRecoverFromRetriableSessionError() { final int sessionsCnt = 2; final int[] linksPerSession = new int[] { 1, 1 }; try (MockEndpoint endpoint = createMockEndpoint(sessionsCnt, linksPerSession)) { final Supplier<ServiceBusReactorAmqpConnection> connectionSupplier = singleConnectionSupplier(endpoint); final Answer<Mono<Void>> sendAnswer = new Answer<Mono<Void>>() { private int invocationCount = -1; @Override public Mono<Void> answer(InvocationOnMock invocation) { invocationCount++; switch (invocationCount) { case 0: endpoint.emitCurrentSessionError(RETRIABLE_SESSION_ERROR); return Mono.error(RETRIABLE_SESSION_ERROR); case 1: return Mono.empty(); default: throw new RuntimeException("More than two invocations of send-answer is not expected."); } } }; when(endpoint.getAmqpSendLink(0, 0).send(anyList())).thenAnswer(sendAnswer); when(endpoint.getAmqpSendLink(1, 0).send(anyList())).thenAnswer(sendAnswer); final int messagesCount = 4; final List<ServiceBusMessage> messagesToSend = createMessagesToSend(messagesCount); final ReactorConnectionCache<ServiceBusReactorAmqpConnection> connectionCache = createConnectionCache(connectionSupplier); final ServiceBusSenderAsyncClient sender = createSenderAsyncClient(connectionCache, false); try { try (VirtualTimeStepVerifier verifier = new VirtualTimeStepVerifier()) { verifier.create(() -> sender.sendMessages(messagesToSend)) .thenAwait(VIRTUAL_TIME_SHIFT) .verifyComplete(); } verify(endpoint.getAmqpSendLink(0, 0)).send(sendMessagesCaptor0.capture()); final List<Message> messagesSendInSession0Link0 = sendMessagesCaptor0.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession0Link0.size()); verify(endpoint.getAmqpSendLink(1, 0)).send(sendMessagesCaptor1.capture()); final List<Message> messagesSendInSession1Link0 = sendMessagesCaptor1.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession1Link0.size()); } finally { sender.close(); connectionCache.dispose(); } } } @Test @Execution(ExecutionMode.SAME_THREAD) void shouldBubbleUpNonRetriableSessionError() { final int sessionsCnt = 2; final int[] linksPerSession = new int[] { 1, 1 }; try (MockEndpoint endpoint = createMockEndpoint(sessionsCnt, linksPerSession)) { final Supplier<ServiceBusReactorAmqpConnection> connectionSupplier = singleConnectionSupplier(endpoint); final Answer<Mono<Void>> sendAnswer = new Answer<Mono<Void>>() { private int invocationCount = -1; @Override public Mono<Void> answer(InvocationOnMock invocation) { invocationCount++; switch (invocationCount) { case 0: endpoint.emitCurrentSessionError(RETRIABLE_SESSION_ERROR); return Mono.error(RETRIABLE_SESSION_ERROR); case 1: endpoint.emitCurrentSessionError(NON_RETRIABLE_ERROR_1); return Mono.error(NON_RETRIABLE_ERROR_1); default: throw new RuntimeException("More than two invocations of send-answer is not expected."); } } }; when(endpoint.getAmqpSendLink(0, 0).send(anyList())).thenAnswer(sendAnswer); when(endpoint.getAmqpSendLink(1, 0).send(anyList())).thenAnswer(sendAnswer); final int messagesCount = 4; final List<ServiceBusMessage> messagesToSend = createMessagesToSend(messagesCount); final ReactorConnectionCache<ServiceBusReactorAmqpConnection> connectionCache = createConnectionCache(connectionSupplier); final ServiceBusSenderAsyncClient sender = createSenderAsyncClient(connectionCache, false); try { try (VirtualTimeStepVerifier verifier = new VirtualTimeStepVerifier()) { verifier.create(() -> sender.sendMessages(messagesToSend)) .thenAwait(VIRTUAL_TIME_SHIFT) .verifyErrorSatisfies(e -> { Assertions.assertTrue(e instanceof ServiceBusException); final ServiceBusException se = (ServiceBusException) e; Assertions.assertSame(NON_RETRIABLE_ERROR_1, se.getCause()); }); } verify(endpoint.getAmqpSendLink(0, 0)).send(sendMessagesCaptor0.capture()); final List<Message> messagesSendInSession0Link0 = sendMessagesCaptor0.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession0Link0.size()); verify(endpoint.getAmqpSendLink(1, 0)).send(sendMessagesCaptor1.capture()); final List<Message> messagesSendInSession1Link0 = sendMessagesCaptor1.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession1Link0.size()); verifyNoInteractions(onClientClosed); } finally { sender.close(); connectionCache.dispose(); } } } @Test @Execution(ExecutionMode.SAME_THREAD) void shouldSenderReusableAfterNonRetriableLinkAndSessionError() { final int sessionsCnt = 3; final int[] linksPerSession = new int[] { 1, 1, 1 }; try (MockEndpoint endpoint = createMockEndpoint(sessionsCnt, linksPerSession)) { final Supplier<ServiceBusReactorAmqpConnection> connectionSupplier = new Supplier<ServiceBusReactorAmqpConnection>() { private int invocationCount = -1; @Override public ServiceBusReactorAmqpConnection get() { invocationCount++; switch (invocationCount) { case 0: final ServiceBusReactorAmqpConnection c = endpoint.arrange(); return c; default: throw new RuntimeException("More than one invocation of connection-supplier is not expected."); } } }; final Answer<Mono<Void>> sendAnswer = new Answer<Mono<Void>>() { private int invocationCount = -1; @Override public Mono<Void> answer(InvocationOnMock invocation) { invocationCount++; switch (invocationCount) { case 0: endpoint.emitCurrentSendLinkError(NON_RETRIABLE_ERROR_1); return Mono.error(NON_RETRIABLE_ERROR_1); case 1: endpoint.emitCurrentSessionError(NON_RETRIABLE_ERROR_2); return Mono.error(NON_RETRIABLE_ERROR_2); case 2: return Mono.empty(); default: throw new RuntimeException("More than three invocations of send-answer is not expected."); } } }; when(endpoint.getAmqpSendLink(0, 0).send(anyList())).thenAnswer(sendAnswer); when(endpoint.getAmqpSendLink(1, 0).send(anyList())).thenAnswer(sendAnswer); when(endpoint.getAmqpSendLink(2, 0).send(any(Message.class))).thenAnswer(sendAnswer); final int messagesCount = 4; final List<ServiceBusMessage> messagesToSend = createMessagesToSend(messagesCount); final ServiceBusMessage messageToSend = createMessageToSend(); final ReactorConnectionCache<ServiceBusReactorAmqpConnection> connectionCache = createConnectionCache(connectionSupplier); final ServiceBusSenderAsyncClient sender = createSenderAsyncClient(connectionCache, false); try { try (VirtualTimeStepVerifier verifier = new VirtualTimeStepVerifier()) { verifier.create(() -> sender.sendMessages(messagesToSend)) .thenAwait(VIRTUAL_TIME_SHIFT) .verifyErrorSatisfies(e -> { Assertions.assertTrue(e instanceof ServiceBusException); final ServiceBusException se = (ServiceBusException) e; Assertions.assertSame(NON_RETRIABLE_ERROR_1, se.getCause()); }); } try (VirtualTimeStepVerifier verifier = new VirtualTimeStepVerifier()) { verifier.create(() -> sender.sendMessages(messagesToSend)) .thenAwait(VIRTUAL_TIME_SHIFT) .verifyErrorSatisfies(e -> { Assertions.assertTrue(e instanceof ServiceBusException); final ServiceBusException se = (ServiceBusException) e; Assertions.assertSame(NON_RETRIABLE_ERROR_2, se.getCause()); }); } try (VirtualTimeStepVerifier verifier = new VirtualTimeStepVerifier()) { verifier.create(() -> sender.sendMessage(messageToSend)) .thenAwait(VIRTUAL_TIME_SHIFT) .verifyComplete(); } verify(endpoint.getAmqpSendLink(0, 0)).send(sendMessagesCaptor0.capture()); final List<Message> messagesSendInSession0Link0 = sendMessagesCaptor0.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession0Link0.size()); verify(endpoint.getAmqpSendLink(1, 0)).send(sendMessagesCaptor1.capture()); final List<Message> messagesSendInSession1Link0 = sendMessagesCaptor1.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession1Link0.size()); verify(endpoint.getAmqpSendLink(2, 0), times(1)).send(any(Message.class)); } finally { sender.close(); connectionCache.dispose(); } } } @Test @Execution(ExecutionMode.SAME_THREAD) void shouldRecoverFromRetriableConnectionError() { final int endpointsCount = 4; final List<SessionLinkCount> sessionLinkCountList = new ArrayList<>(endpointsCount); sessionLinkCountList.add(new SessionLinkCount(1, new int[] { 1 })); sessionLinkCountList.add(new SessionLinkCount(1, new int[] { 1 })); sessionLinkCountList.add(new SessionLinkCount(1, new int[] { 1 })); sessionLinkCountList.add(new SessionLinkCount(1, new int[] { 1 })); try (MockEndpoints endpoints = createMockEndpoints(sessionLinkCountList)) { final AtomicReference<MockEndpoint> currentEndpoint = new AtomicReference<>(); final Supplier<ServiceBusReactorAmqpConnection> connectionSupplier = new Supplier<ServiceBusReactorAmqpConnection>() { private int endpointIndex = -1; @Override public ServiceBusReactorAmqpConnection get() { endpointIndex++; if (endpointIndex >= endpointsCount) { throw new RuntimeException("More than " + endpointsCount + " invocation of connection-supplier is not expected."); } final MockEndpoint e = endpoints.get(endpointIndex); currentEndpoint.set(e); final ServiceBusReactorAmqpConnection c = e.arrange(); return c; } }; final Answer<Mono<Void>> sendAnswer = new Answer<Mono<Void>>() { private int invocationCount = -1; @Override public Mono<Void> answer(InvocationOnMock invocation) { invocationCount++; switch (invocationCount) { case 0: Assertions.assertEquals(endpoints.get(0), currentEndpoint.get()); endpoints.get(0).emitCurrentSendLinkError(RETRIABLE_LINK_ERROR); return Mono.error(RETRIABLE_LINK_ERROR); case 1: Assertions.assertEquals(endpoints.get(1), currentEndpoint.get()); endpoints.get(1).emitCurrentSessionError(RETRIABLE_SESSION_ERROR); return Mono.error(RETRIABLE_SESSION_ERROR); case 2: Assertions.assertEquals(endpoints.get(2), currentEndpoint.get()); endpoints.get(2).emitConnectionError(RETRIABLE_CONNECTION_ERROR); return Mono.error(RETRIABLE_CONNECTION_ERROR); case 3: Assertions.assertEquals(endpoints.get(3), currentEndpoint.get()); return Mono.empty(); default: throw new RuntimeException("More than three invocations of send-answer is not expected."); } } }; when(endpoints.get(0).getAmqpSendLink(0, 0).send(anyList())).thenAnswer(sendAnswer); when(endpoints.get(1).getAmqpSendLink(0, 0).send(anyList())).thenAnswer(sendAnswer); when(endpoints.get(2).getAmqpSendLink(0, 0).send(anyList())).thenAnswer(sendAnswer); when(endpoints.get(3).getAmqpSendLink(0, 0).send(anyList())).thenAnswer(sendAnswer); final int messagesCount = 4; final List<ServiceBusMessage> messagesToSend = createMessagesToSend(messagesCount); final ReactorConnectionCache<ServiceBusReactorAmqpConnection> connectionCache = createConnectionCache(connectionSupplier); final ServiceBusSenderAsyncClient sender = createSenderAsyncClient(connectionCache, false); try { try (VirtualTimeStepVerifier verifier = new VirtualTimeStepVerifier()) { verifier.create(() -> sender.sendMessages(messagesToSend)) .thenAwait(VIRTUAL_TIME_SHIFT) .verifyComplete(); } verify(endpoints.get(0).getAmqpSendLink(0, 0), times(1)).send(anyList()); verify(endpoints.get(1).getAmqpSendLink(0, 0), times(1)).send(anyList()); verify(endpoints.get(2).getAmqpSendLink(0, 0), times(1)).send(anyList()); verify(endpoints.get(3).getAmqpSendLink(0, 0), times(1)).send(anyList()); } finally { sender.close(); connectionCache.dispose(); } } } private MockEndpoint createMockEndpoint(int sessionsCnt, int[] linksPerSession) { final String connectionId = "1"; return MockEndpoint.create(connectionId, QUEUE_NAME, RETRY_OPTIONS, sessionsCnt, linksPerSession); } private MockEndpoints createMockEndpoints(List<SessionLinkCount> sessionLinkCountList) { return MockEndpoints.create(QUEUE_NAME, RETRY_OPTIONS, sessionLinkCountList); } private Supplier<ServiceBusReactorAmqpConnection> singleConnectionSupplier(MockEndpoint endpoint) { final Supplier<ServiceBusReactorAmqpConnection> connectionSupplier = new Supplier<ServiceBusReactorAmqpConnection>() { private int invocationCount = -1; @Override public ServiceBusReactorAmqpConnection get() { invocationCount++; switch (invocationCount) { case 0: final ServiceBusReactorAmqpConnection c = endpoint.arrange(); return c; default: throw new RuntimeException("More than one invocation of connection-supplier is not expected."); } } }; return connectionSupplier; } private ReactorConnectionCache<ServiceBusReactorAmqpConnection> createConnectionCache( Supplier<ServiceBusReactorAmqpConnection> connectionSupplier) { return new ReactorConnectionCache<>(connectionSupplier, FQDN, QUEUE_NAME, getRetryPolicy(RETRY_OPTIONS), new HashMap<>()); } private ServiceBusSenderAsyncClient createSenderAsyncClient( ReactorConnectionCache<ServiceBusReactorAmqpConnection> connectionCache, boolean isSharedConnection) { final ConnectionCacheWrapper connectionSupport = new ConnectionCacheWrapper(connectionCache); return new ServiceBusSenderAsyncClient(QUEUE_NAME, MessagingEntityType.QUEUE, connectionSupport, RETRY_OPTIONS, DEFAULT_INSTRUMENTATION, messageSerializer, onClientClosed, "", CLIENT_IDENTIFIER); } private static List<ServiceBusMessage> createMessagesToSend(int messagesCount) { return IntStream.range(0, messagesCount) .mapToObj(__ -> new ServiceBusMessage(TEST_CONTENTS)) .collect(Collectors.toList()); } private static ServiceBusMessage createMessageToSend() { final ServiceBusMessage messageToSend = new ServiceBusMessage(TEST_CONTENTS); return messageToSend; } private static final class VirtualTimeStepVerifier implements AutoCloseable { private final VirtualTimeScheduler scheduler; VirtualTimeStepVerifier() { scheduler = VirtualTimeScheduler.create(); } <T> StepVerifier.Step<T> create(Supplier<Mono<T>> scenarioSupplier) { return StepVerifier.withVirtualTime(scenarioSupplier, () -> scheduler, 0); } @Override public void close() { scheduler.dispose(); } } private static class SessionLinkCount { private final int sessionsCnt; private final int[] linksPerSession; SessionLinkCount(int sessionsCnt, int[] linksPerSession) { this.sessionsCnt = sessionsCnt; this.linksPerSession = linksPerSession; } } private static final class MockEndpoints implements Closeable { private final List<MockEndpoint> mockEndpoints; private final int mockEndpointsCnt; private MockEndpoints(List<MockEndpoint> mockEndpoints) { this.mockEndpoints = mockEndpoints; this.mockEndpointsCnt = this.mockEndpoints.size(); } static MockEndpoints create(String queueName, AmqpRetryOptions retryOptions, List<SessionLinkCount> sessionLinkCounts) { final List<MockEndpoint> mockEndpoints = new ArrayList<>(sessionLinkCounts.size()); int conId = 1; for (SessionLinkCount slc : sessionLinkCounts) { mockEndpoints.add(MockEndpoint.create(String.valueOf(conId), queueName, retryOptions, slc.sessionsCnt, slc.linksPerSession)); conId++; } return new MockEndpoints(mockEndpoints); } MockEndpoint get(int index) { if (index >= mockEndpointsCnt) { throw new IndexOutOfBoundsException("index:" + index + " maxIndex: " + (mockEndpointsCnt - 1)); } return mockEndpoints.get(index); } @Override public void close() { for (MockEndpoint mockEndpoint : mockEndpoints) { mockEndpoint.close(); } } } private static final class MockEndpoint implements Closeable { private final String connectionId; private final String queueName; private final AmqpRetryOptions retryOptions; private final MockSendSessions mockSendSessions; private final ConnectionOptions connectionOptions; private final Connection connection; private final Reactor reactor; private final ReactorDispatcher reactorDispatcher; private final ReactorExecutor reactorExecutor; private final ReactorProvider reactorProvider; private final ConnectionHandler connectionHandler; private final Sinks.Many<EndpointState> connectionStateSink; private final ReactorHandlerProvider handlerProvider; private final ServiceBusAmqpLinkProvider linkProvider; private final TokenManagerProvider tokenManagerProvider; private final TokenManager tokenManager; private final MessageSerializer messageSerializer; private final AtomicBoolean arranged = new AtomicBoolean(false); private MockEndpoint(String connectionId, String queueName, AmqpRetryOptions retryOptions, MockSendSessions mockSendSessions, ConnectionOptions connectionOptions, Connection connection, Reactor reactor, ReactorDispatcher reactorDispatcher, ReactorExecutor reactorExecutor, ReactorProvider reactorProvider, ConnectionHandler connectionHandler, Sinks.Many<EndpointState> connectionStateSink, ReactorHandlerProvider handlerProvider, ServiceBusAmqpLinkProvider linkProvider, TokenManagerProvider tokenManagerProvider, TokenManager tokenManager, MessageSerializer messageSerializer) { this.connectionId = connectionId; this.queueName = queueName; this.retryOptions = retryOptions; this.mockSendSessions = mockSendSessions; this.connectionOptions = connectionOptions; this.connection = connection; this.reactor = reactor; this.reactorDispatcher = reactorDispatcher; this.reactorExecutor = reactorExecutor; this.reactorProvider = reactorProvider; this.connectionHandler = connectionHandler; this.connectionStateSink = connectionStateSink; this.handlerProvider = handlerProvider; this.linkProvider = linkProvider; this.tokenManagerProvider = tokenManagerProvider; this.tokenManager = tokenManager; this.messageSerializer = messageSerializer; } static MockEndpoint create(String connectionId, String queueName, AmqpRetryOptions retryOptions, int sessionsCnt, int[] linksPerSession) { Assertions.assertNotNull(retryOptions); Assertions.assertTrue(sessionsCnt > 0, "sessionsCnt must be > 0."); Assertions.assertEquals(sessionsCnt, linksPerSession.length); for (int linksCnt = 0; linksCnt < linksPerSession.length; linksCnt++) { Assertions.assertTrue(linksCnt >= 0, "links-count in linksPerSession must be >= 0."); } final MockSendSessions mockSendSessions = MockSendSessions.create(connectionId, sessionsCnt, linksPerSession); final ConnectionOptions connectionOptions = mock(ConnectionOptions.class); final Connection connection = mock(Connection.class); final Sinks.Many<EndpointState> connectionStateSink = Sinks.many().replay() .latestOrDefault(EndpointState.UNINITIALIZED); final ConnectionHandler connectionHandler = mock(ConnectionHandler.class); final Reactor reactor = mock(Reactor.class); final ReactorDispatcher reactorDispatcher = mock(ReactorDispatcher.class); final ReactorExecutor reactorExecutor = mock(ReactorExecutor.class); final ReactorProvider reactorProvider = mock(ReactorProvider.class); final ReactorHandlerProvider handlerProvider = mock(ReactorHandlerProvider.class); final ServiceBusAmqpLinkProvider linkProvider = mock(ServiceBusAmqpLinkProvider.class); final TokenManager tokenManager = mock(TokenManager.class); final TokenManagerProvider tokenManagerProvider = mock(TokenManagerProvider.class); final MessageSerializer messageSerializer = mock(MessageSerializer.class); return new MockEndpoint(connectionId, queueName, retryOptions, mockSendSessions, connectionOptions, connection, reactor, reactorDispatcher, reactorExecutor, reactorProvider, connectionHandler, connectionStateSink, handlerProvider, linkProvider, tokenManagerProvider, tokenManager, messageSerializer); } ServiceBusReactorAmqpConnection arrange() { if (arranged.getAndSet(true)) { throw new RuntimeException("Only one connection can be obtained from a MockEndpoint instance."); } mockSendSessions.arrange(handlerProvider, linkProvider, connection, connectionStateSink); when(connectionOptions.getRetry()).thenReturn(retryOptions); doNothing().when(connection).close(); connectionStateSink.emitNext(EndpointState.ACTIVE, Sinks.EmitFailureHandler.FAIL_FAST); when(connectionHandler.getEndpointStates()).thenReturn(connectionStateSink.asFlux().distinctUntilChanged()); doNothing().when(connectionHandler).close(); when(reactor.connectionToHost(any(), anyInt(), any())).thenReturn(connection); try { doAnswer(invocation -> { final Runnable work = invocation.getArgument(0); work.run(); return null; }).when(reactorDispatcher).invoke(any(Runnable.class)); } catch (IOException ioe) { throw new UncheckedIOException(ioe); } when(reactorDispatcher.getShutdownSignal()).thenReturn(Mono.empty()); doNothing().when(reactorExecutor).start(); when(reactorExecutor.closeAsync()).thenReturn(Mono.empty()); try { when(reactorProvider.createReactor(anyString(), anyInt())).thenReturn(reactor); } catch (IOException ioe) { throw new UncheckedIOException(ioe); } when(reactorProvider.getReactorDispatcher()).thenReturn(reactorDispatcher); when(reactorProvider.createExecutor(any(), anyString(), any(), any(), any())).thenReturn(reactorExecutor); when(handlerProvider.createConnectionHandler(anyString(), any())).thenReturn(connectionHandler); when(tokenManager.authorize()).thenReturn(Mono.just(Duration.ofHours(1).toMillis())); when(tokenManagerProvider.getTokenManager(any(), anyString())).thenReturn(tokenManager); final boolean isV2 = true; return new ServiceBusReactorAmqpConnection(connectionId, connectionOptions, reactorProvider, handlerProvider, linkProvider, tokenManagerProvider, messageSerializer, false, isV2); } AmqpSendLink getAmqpSendLink(int sessionIdx, int linkIdx) { return mockSendSessions.getAmqpSendLink(sessionIdx, linkIdx); } void emitConnectionError(Throwable throwable) { connectionStateSink.emitError(throwable, Sinks.EmitFailureHandler.FAIL_FAST); } void emitConnectionCompletion() { connectionStateSink.emitComplete(Sinks.EmitFailureHandler.FAIL_FAST); } void emitCurrentSessionState(EndpointState state) { final MockSendSession session = mockSendSessions.getCurrentSendSession(); session.emitSessionState(state); } void emitCurrentSessionError(Throwable throwable) { final MockSendSession session = mockSendSessions.getCurrentSendSession(); session.emitSessionError(throwable); } void emitCurrentSessionCompletion() { final MockSendSession session = mockSendSessions.getCurrentSendSession(); session.emitSessionCompletion(); } void emitCurrentSendLinkState(EndpointState state) { final MockSendLink sendLink = mockSendSessions.getCurrentSendLink(); sendLink.emitSendLinkState(state); } void emitCurrentSendLinkError(Throwable throwable) { final MockSendLink sendLink = mockSendSessions.getCurrentSendLink(); sendLink.emitSendLinkError(throwable); } void emitCurrentSendLinkCompletion() { final MockSendLink sendLink = mockSendSessions.getCurrentSendLink(); sendLink.emitSendLinkCompletion(); } @Override public void close() { Mockito.framework().clearInlineMock(connectionOptions); Mockito.framework().clearInlineMock(connection); Mockito.framework().clearInlineMock(connectionHandler); Mockito.framework().clearInlineMock(reactor); Mockito.framework().clearInlineMock(reactorDispatcher); Mockito.framework().clearInlineMock(reactorExecutor); Mockito.framework().clearInlineMock(reactorProvider); Mockito.framework().clearInlineMock(handlerProvider); Mockito.framework().clearInlineMock(linkProvider); Mockito.framework().clearInlineMock(tokenManager); Mockito.framework().clearInlineMock(tokenManagerProvider); Mockito.framework().clearInlineMock(messageSerializer); mockSendSessions.close(); } } private static final class MockSendSessions implements Closeable { private final Object lock = new Object(); private final List<MockSendSession> mockSendSessions; private final MockSendSession terminalMockSendSession; private final int sessionsCnt; private int sessionIdx; private MockSendSession currentMockSendSession; private MockSendSessions(List<MockSendSession> mockSendSessions, MockSendSession terminalMockSendSession) { this.mockSendSessions = mockSendSessions; this.terminalMockSendSession = terminalMockSendSession; this.sessionsCnt = this.mockSendSessions.size(); this.sessionIdx = 0; } static MockSendSessions create(String connectionId, int sessionsCnt, int[] linksPerSession) { final List<MockSendSession> mockSendSessions = new ArrayList<>(sessionsCnt); for (int i = 0; i < sessionsCnt; i++) { mockSendSessions.add(MockSendSession.create(connectionId, linksPerSession[i])); } final MockSendSession terminalMockSendSession = MockSendSession.create(connectionId, 0); return new MockSendSessions(Collections.unmodifiableList(mockSendSessions), terminalMockSendSession); } void arrange(ReactorHandlerProvider handlerProvider, AmqpLinkProvider linkProvider, Connection connection, Sinks.Many<EndpointState> connectionStateSink) { for (MockSendSession mockSession : mockSendSessions) { mockSession.arrange(); mockSession.emitSessionState(EndpointState.ACTIVE); } terminalMockSendSession.arrange(); terminalMockSendSession.emitSessionCompletion(); when(handlerProvider.createSessionHandler(anyString(), any(), anyString(), any())) .thenAnswer(invocation -> { final MockSendSession session = moveToNextSendSession(connectionStateSink); return session.getSessionHandler(); }); when(handlerProvider.createSendLinkHandler(anyString(), any(), anyString(), anyString())) .thenAnswer(invocation -> { final MockSendLink sendLink = moveToNextSendLinkInCurrentSession(); return sendLink.getSendLinkHandler(); }); when(linkProvider.createSendLink(any(ServiceBusReactorAmqpConnection.class), anyString(), any(Sender.class), any(SendLinkHandler.class), any(ReactorProvider.class), any(TokenManager.class), any(MessageSerializer.class), any(AmqpRetryOptions.class), any(Scheduler.class), any())) .thenAnswer(invocation -> { final SendLinkHandler sendLinkHandler = invocation.getArgument(3); final AmqpSendLink amqpSendLink = lookupAmqpSendLinkFor(sendLinkHandler); return amqpSendLink; }); final ArrayList<Session> qpidSessions = new ArrayList<>(sessionsCnt + 1); for (MockSendSession mockSendSession : mockSendSessions) { qpidSessions.add(mockSendSession.getQpidSession()); } qpidSessions.add(terminalMockSendSession.getQpidSession()); when(connection.session()) .thenReturn(qpidSessions.get(0), qpidSessions.subList(1, sessionsCnt + 1).toArray(new Session[0])); } AmqpSendLink getAmqpSendLink(int sessionIdx, int linkIdx) { Assertions.assertTrue(sessionIdx >= 0 && sessionIdx < sessionsCnt, "sessionIdx is not in range."); final MockSendSession session = mockSendSessions.get(sessionIdx); return session.getAmqpSendLink(linkIdx); } MockSendSession getCurrentSendSession() { final MockSendSession session; synchronized (lock) { session = Objects.requireNonNull(currentMockSendSession, "Current Session is null"); } return session; } MockSendLink getCurrentSendLink() { final MockSendLink sendLink; synchronized (lock) { final MockSendSession session = getCurrentSendSession(); sendLink = session.getCurrentSendLink(); } return sendLink; } private MockSendSession moveToNextSendSession(Sinks.Many<EndpointState> connectionStateSink) { final MockSendSession nextSession; synchronized (lock) { if (sessionIdx >= sessionsCnt) { nextSession = terminalMockSendSession; } else { nextSession = mockSendSessions.get(sessionIdx); sessionIdx++; } this.currentMockSendSession = nextSession; } if (isTerminalSession(nextSession)) { connectionStateSink.emitComplete(Sinks.EmitFailureHandler.FAIL_FAST); } return nextSession; } private MockSendLink moveToNextSendLinkInCurrentSession() { final MockSendSession session; final MockSendLink nextSendLink; synchronized (lock) { session = Objects.requireNonNull(currentMockSendSession, "Current Session is null"); nextSendLink = session.moveToNextSendLink(); } if (session.isTerminalSendLink(nextSendLink)) { session.emitSessionCompletion(); } return nextSendLink; } private AmqpSendLink lookupAmqpSendLinkFor(SendLinkHandler sendLinkHandler) { for (MockSendSession mockSendSession : mockSendSessions) { final AmqpSendLink amqpSendLink = mockSendSession.lookupAmqpSendLinkFor(sendLinkHandler); if (amqpSendLink != null) { return amqpSendLink; } } final AmqpSendLink amqpSendLink = terminalMockSendSession.lookupAmqpSendLinkFor(sendLinkHandler); if (amqpSendLink != null) { return amqpSendLink; } throw new NullPointerException("Lookup for AmqpSendLink failed."); } private boolean isTerminalSession(MockSendSession session) { return session == terminalMockSendSession; } @Override public void close() { for (MockSendSession mockSendSession : mockSendSessions) { mockSendSession.close(); } terminalMockSendSession.close(); } } private static final class MockSendSession implements Closeable { private final String connectionId; private final Session session; private final Record sessionAttachments; private final SessionHandler sessionHandler; private final Sinks.Many<EndpointState> sessionStateSink; private final List<MockSendLink> mockSendLinks; private final MockSendLink terminalMockSendLink; private final int sendLinkCnt; private int sendLinkIdx; private MockSendLink currentMockSendLink; private MockSendSession(String connectionId, Session session, Record sessionAttachments, SessionHandler sessionHandler, Sinks.Many<EndpointState> sessionStateSink, List<MockSendLink> mockSendLinks, MockSendLink terminalMockSendLink) { this.connectionId = connectionId; this.session = session; this.sessionAttachments = sessionAttachments; this.sessionHandler = sessionHandler; this.sessionStateSink = sessionStateSink; this.mockSendLinks = mockSendLinks; this.terminalMockSendLink = terminalMockSendLink; this.sendLinkCnt = this.mockSendLinks.size(); this.sendLinkIdx = 0; } static MockSendSession create(String connectionId, int sendLinkCnt) { final List<MockSendLink> mockSendLinks = new ArrayList<>(sendLinkCnt); for (int i = 0; i < sendLinkCnt; i++) { mockSendLinks.add(MockSendLink.create()); } final MockSendLink terminalMockSendLink = MockSendLink.create(); final Record sessionAttachments = mock(Record.class); final Session session = mock(Session.class); final SessionHandler sessionHandler = mock(SessionHandler.class); final Sinks.Many<EndpointState> sessionStateSink = Sinks.many().replay() .latestOrDefault(EndpointState.UNINITIALIZED); return new MockSendSession(connectionId, session, sessionAttachments, sessionHandler, sessionStateSink, Collections.unmodifiableList(mockSendLinks), terminalMockSendLink); } void arrange() { for (MockSendLink mockSendLink : mockSendLinks) { mockSendLink.arrange(); mockSendLink.emitSendLinkState(EndpointState.ACTIVE); } terminalMockSendLink.arrange(); terminalMockSendLink.emitSendLinkCompletion(); final Answer<Mono<Void>> terminalSendAnswer = new Answer<Mono<Void>>() { @Override public Mono<Void> answer(InvocationOnMock invocation) { return Mono.error(new AmqpException(true, "terminal-send-link-result", null)); } }; when(terminalMockSendLink.getAmqpSendLink().send(anyList())).then(terminalSendAnswer); when(terminalMockSendLink.getAmqpSendLink().send(any(Message.class))).then(terminalSendAnswer); when(terminalMockSendLink.getAmqpSendLink().send(any(Message.class), any(DeliveryState.class))).then(terminalSendAnswer); when(terminalMockSendLink.getAmqpSendLink().send(anyList(), any(DeliveryState.class))).then(terminalSendAnswer); when(terminalMockSendLink.getAmqpSendLink().send(any(), anyInt(), anyInt(), any(DeliveryState.class))).then(terminalSendAnswer); doNothing().when(sessionAttachments).set(any(), any(), anyString()); when(session.attachments()).thenReturn(sessionAttachments); doNothing().when(session).open(); doNothing().when(session).setCondition(any()); when(sessionHandler.getConnectionId()).thenReturn(connectionId); when(sessionHandler.getEndpointStates()).thenReturn(sessionStateSink.asFlux().distinctUntilChanged()); doNothing().when(sessionHandler).close(); final ArrayList<Sender> qpidSenders = new ArrayList<>(sendLinkCnt + 1); for (MockSendLink mockSendLink : mockSendLinks) { qpidSenders.add(mockSendLink.getQpidSender()); } qpidSenders.add(terminalMockSendLink.getQpidSender()); when(session.sender(any())) .thenReturn(qpidSenders.get(0), qpidSenders.subList(1, sendLinkCnt + 1).toArray(new Sender[0])); } Session getQpidSession() { return session; } SessionHandler getSessionHandler() { return sessionHandler; } AmqpSendLink getAmqpSendLink(int linkIdx) { Assertions.assertTrue(linkIdx >= 0 && linkIdx < sendLinkCnt, "linkIdx is not in range."); return mockSendLinks.get(linkIdx).getAmqpSendLink(); } MockSendLink moveToNextSendLink() { final MockSendLink nextSendLink; if (sendLinkIdx >= sendLinkCnt) { nextSendLink = terminalMockSendLink; } else { nextSendLink = mockSendLinks.get(sendLinkIdx); sendLinkIdx++; } currentMockSendLink = nextSendLink; return nextSendLink; } boolean isTerminalSendLink(MockSendLink link) { return this.terminalMockSendLink == link; } MockSendLink getCurrentSendLink() { return Objects.requireNonNull(currentMockSendLink, "Current Link is null"); } AmqpSendLink lookupAmqpSendLinkFor(SendLinkHandler sendLinkHandler) { for (MockSendLink sendLink : mockSendLinks) { if (sendLink.getSendLinkHandler() == sendLinkHandler) { return sendLink.getAmqpSendLink(); } } if (terminalMockSendLink.getSendLinkHandler() == sendLinkHandler) { return terminalMockSendLink.getAmqpSendLink(); } return null; } void emitSessionState(EndpointState state) { this.sessionStateSink.emitNext(state, Sinks.EmitFailureHandler.FAIL_FAST); } void emitSessionError(Throwable error) { this.sessionStateSink.emitError(error, Sinks.EmitFailureHandler.FAIL_FAST); } void emitSessionCompletion() { this.sessionStateSink.emitComplete(Sinks.EmitFailureHandler.FAIL_FAST); } @Override public void close() { Mockito.framework().clearInlineMock(session); Mockito.framework().clearInlineMock(sessionAttachments); Mockito.framework().clearInlineMock(sessionHandler); for (MockSendLink sendLink : mockSendLinks) { sendLink.close(); } terminalMockSendLink.close(); } } private static final class MockSendLink implements Closeable { private final Sender sender; private final Record senderAttachments; private final AmqpSendLink amqpSendLink; private final SendLinkHandler sendLinkHandler; private final Sinks.Many<EndpointState> sendLinkStateSink; private MockSendLink(Sender sender, Record senderAttachments, AmqpSendLink amqpSendLink, SendLinkHandler sendLinkHandler, Sinks.Many<EndpointState> sendLinkStateSink) { this.sender = sender; this.senderAttachments = senderAttachments; this.amqpSendLink = amqpSendLink; this.sendLinkHandler = sendLinkHandler; this.sendLinkStateSink = sendLinkStateSink; } static MockSendLink create() { final Record senderAttachments = mock(Record.class); final Sender sender = mock(Sender.class); final AmqpSendLink amqpSendLink = mock(AmqpSendLink.class); final SendLinkHandler sendLinkHandler = mock(SendLinkHandler.class); final Sinks.Many<EndpointState> sendLinkStateSink = Sinks.many().replay() .latestOrDefault(EndpointState.UNINITIALIZED); return new MockSendLink(sender, senderAttachments, amqpSendLink, sendLinkHandler, sendLinkStateSink); } void arrange() { doNothing().when(senderAttachments).set(any(), any(), anyString()); when(sender.attachments()).thenReturn(senderAttachments); doNothing().when(sender).setTarget(any()); doNothing().when(sender).setSenderSettleMode(any()); doNothing().when(sender).setProperties(any()); doNothing().when(sender).setSource(any()); doNothing().when(sender).open(); when(amqpSendLink.getLinkSize()).thenReturn(Mono.just(ServiceBusSenderAsyncClient.MAX_MESSAGE_LENGTH_BYTES)); when(amqpSendLink.getEndpointStates()) .thenReturn(sendLinkStateSink.asFlux().distinctUntilChanged().map(state -> toAmqpEndpointState(state))); when(sendLinkHandler.getEndpointStates()).thenReturn(sendLinkStateSink.asFlux().distinctUntilChanged()); doNothing().when(sendLinkHandler).close(); } Sender getQpidSender() { return sender; } AmqpSendLink getAmqpSendLink() { return amqpSendLink; } SendLinkHandler getSendLinkHandler() { return sendLinkHandler; } void emitSendLinkState(EndpointState state) { this.sendLinkStateSink.emitNext(state, Sinks.EmitFailureHandler.FAIL_FAST); } void emitSendLinkError(Throwable error) { this.sendLinkStateSink.emitError(error, Sinks.EmitFailureHandler.FAIL_FAST); } void emitSendLinkCompletion() { this.sendLinkStateSink.emitComplete(Sinks.EmitFailureHandler.FAIL_FAST); } private static AmqpEndpointState toAmqpEndpointState(EndpointState state) { switch (state) { case ACTIVE: return AmqpEndpointState.ACTIVE; case UNINITIALIZED: return AmqpEndpointState.UNINITIALIZED; case CLOSED: return AmqpEndpointState.CLOSED; default: throw new IllegalArgumentException("This endpoint state is not supported. State:" + state); } } @Override public void close() { Mockito.framework().clearInlineMock(sender); Mockito.framework().clearInlineMock(senderAttachments); Mockito.framework().clearInlineMock(amqpSendLink); Mockito.framework().clearInlineMock(sendLinkHandler); } } }
I agree, it’s easier to read the lambda form and less lines. But the reason for choosing the anonymous form is – the function needs to use a counter which has to be outside the local scope of the function. I was trying to avoid declaring that counter as atomic or array with one element.
void shouldRecoverFromRetriableSendLinkError() { final int sessionsCnt = 1; final int[] linksPerSession = new int[] { 2 }; try (MockEndpoint endpoint = createMockEndpoint(sessionsCnt, linksPerSession)) { final Supplier<ServiceBusReactorAmqpConnection> connectionSupplier = singleConnectionSupplier(endpoint); final Answer<Mono<Void>> sendAnswer = new Answer<Mono<Void>>() { private int invocationCount = -1; @Override public Mono<Void> answer(InvocationOnMock invocation) { invocationCount++; switch (invocationCount) { case 0: endpoint.emitCurrentSendLinkError(RETRIABLE_LINK_ERROR); return Mono.error(RETRIABLE_LINK_ERROR); case 1: return Mono.empty(); default: throw new RuntimeException("More than two invocations of send-answer is not expected."); } } }; when(endpoint.getAmqpSendLink(0, 0).send(anyList())).thenAnswer(sendAnswer); when(endpoint.getAmqpSendLink(0, 1).send(anyList())).thenAnswer(sendAnswer); final int messagesCount = 4; final List<ServiceBusMessage> messagesToSend = createMessagesToSend(messagesCount); final ReactorConnectionCache<ServiceBusReactorAmqpConnection> connectionCache = createConnectionCache(connectionSupplier); final ServiceBusSenderAsyncClient sender = createSenderAsyncClient(connectionCache, false); try { try (VirtualTimeStepVerifier verifier = new VirtualTimeStepVerifier()) { verifier.create(() -> sender.sendMessages(messagesToSend)) .thenAwait(VIRTUAL_TIME_SHIFT) .verifyComplete(); } verify(endpoint.getAmqpSendLink(0, 0)).send(sendMessagesCaptor0.capture()); final List<Message> messagesSendInSession0Link0 = sendMessagesCaptor0.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession0Link0.size()); verify(endpoint.getAmqpSendLink(0, 1)).send(sendMessagesCaptor1.capture()); final List<Message> messagesSendInSession0Link1 = sendMessagesCaptor1.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession0Link1.size()); } finally { sender.close(); connectionCache.dispose(); } } }
final Answer<Mono<Void>> sendAnswer = new Answer<Mono<Void>>() {
void shouldRecoverFromRetriableSendLinkError() { final int sessionsCnt = 1; final int[] linksPerSession = new int[] { 2 }; try (MockEndpoint endpoint = createMockEndpoint(sessionsCnt, linksPerSession)) { final Supplier<ServiceBusReactorAmqpConnection> connectionSupplier = singleConnectionSupplier(endpoint); final Answer<Mono<Void>> sendAnswer = new Answer<Mono<Void>>() { private int invocationCount = -1; @Override public Mono<Void> answer(InvocationOnMock invocation) { invocationCount++; switch (invocationCount) { case 0: endpoint.emitCurrentSendLinkError(RETRIABLE_LINK_ERROR); return Mono.error(RETRIABLE_LINK_ERROR); case 1: return Mono.empty(); default: throw new RuntimeException("More than two invocations of send-answer is not expected."); } } }; when(endpoint.getAmqpSendLink(0, 0).send(anyList())).thenAnswer(sendAnswer); when(endpoint.getAmqpSendLink(0, 1).send(anyList())).thenAnswer(sendAnswer); final int messagesCount = 4; final List<ServiceBusMessage> messagesToSend = createMessagesToSend(messagesCount); final ReactorConnectionCache<ServiceBusReactorAmqpConnection> connectionCache = createConnectionCache(connectionSupplier); final ServiceBusSenderAsyncClient sender = createSenderAsyncClient(connectionCache, false); try { try (VirtualTimeStepVerifier verifier = new VirtualTimeStepVerifier()) { verifier.create(() -> sender.sendMessages(messagesToSend)) .thenAwait(VIRTUAL_TIME_SHIFT) .verifyComplete(); } verify(endpoint.getAmqpSendLink(0, 0)).send(sendMessagesCaptor0.capture()); final List<Message> messagesSendInSession0Link0 = sendMessagesCaptor0.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession0Link0.size()); verify(endpoint.getAmqpSendLink(0, 1)).send(sendMessagesCaptor1.capture()); final List<Message> messagesSendInSession0Link1 = sendMessagesCaptor1.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession0Link1.size()); } finally { sender.close(); connectionCache.dispose(); } } }
class ServiceBusSenderAsyncClientRecoveryIsolatedTest { private static final BinaryData TEST_CONTENTS = BinaryData.fromString("My message for service bus queue!"); private static final String FQDN = "contoso-shopping.servicebus.windows.net"; private static final String QUEUE_NAME = "orders"; private static final String CLIENT_IDENTIFIER = "client-identifier"; private static final ServiceBusSenderInstrumentation DEFAULT_INSTRUMENTATION = new ServiceBusSenderInstrumentation( null, null, FQDN, QUEUE_NAME); private static final Duration VIRTUAL_TIME_SHIFT = OPERATION_TIMEOUT.plusSeconds(30); private static final AmqpException RETRIABLE_LINK_ERROR = new AmqpException(true, AmqpErrorCondition.LINK_DETACH_FORCED, "detach-link-error", new AmqpErrorContext(FQDN)); private static final AmqpException RETRIABLE_SESSION_ERROR = new AmqpException(true, "session-error", new AmqpErrorContext(FQDN)); private static final AmqpException RETRIABLE_CONNECTION_ERROR = new AmqpException(true, AmqpErrorCondition.CONNECTION_FORCED, "connection-forced-error", new AmqpErrorContext(FQDN)); private static final AmqpException NON_RETRIABLE_ERROR_1 = new AmqpException(false, AmqpErrorCondition.NOT_ALLOWED, "not-allowed-error-1", new AmqpErrorContext(FQDN)); private static final AmqpException NON_RETRIABLE_ERROR_2 = new AmqpException(false, AmqpErrorCondition.NOT_ALLOWED, "not-allowed-error-2", new AmqpErrorContext(FQDN)); private static final AmqpRetryOptions RETRY_OPTIONS = new AmqpRetryOptions() .setMode(AmqpRetryMode.FIXED) .setMaxRetries(10) .setMaxDelay(Duration.ofSeconds(5)) .setDelay(Duration.ofSeconds(1)) .setTryTimeout(OPERATION_TIMEOUT); private final MessageSerializer messageSerializer = new ServiceBusMessageSerializer(); @Mock private Runnable onClientClosed; @Captor private ArgumentCaptor<List<Message>> sendMessagesCaptor0; @Captor private ArgumentCaptor<List<Message>> sendMessagesCaptor1; private AutoCloseable mocksCloseable; @BeforeEach void setup() { mocksCloseable = MockitoAnnotations.openMocks(this); } @AfterEach void teardown() throws Exception { Mockito.framework().clearInlineMock(this); if (mocksCloseable != null) { mocksCloseable.close(); } } @Test @Execution(ExecutionMode.SAME_THREAD) @Test @Execution(ExecutionMode.SAME_THREAD) void shouldBubbleUpNonRetriableSendLinkError() { final int sessionsCnt = 1; final int[] linksPerSession = new int[] { 2 }; try (MockEndpoint endpoint = createMockEndpoint(sessionsCnt, linksPerSession)) { final Supplier<ServiceBusReactorAmqpConnection> connectionSupplier = singleConnectionSupplier(endpoint); final Answer<Mono<Void>> sendAnswer = new Answer<Mono<Void>>() { private int invocationCount = -1; @Override public Mono<Void> answer(InvocationOnMock invocation) { invocationCount++; switch (invocationCount) { case 0: endpoint.emitCurrentSendLinkError(RETRIABLE_LINK_ERROR); return Mono.error(RETRIABLE_LINK_ERROR); case 1: endpoint.emitCurrentSendLinkError(NON_RETRIABLE_ERROR_1); return Mono.error(NON_RETRIABLE_ERROR_1); default: throw new RuntimeException("More than two invocations of send-answer is not expected."); } } }; when(endpoint.getAmqpSendLink(0, 0).send(anyList())).thenAnswer(sendAnswer); when(endpoint.getAmqpSendLink(0, 1).send(anyList())).thenAnswer(sendAnswer); final int messagesCount = 4; final List<ServiceBusMessage> messagesToSend = createMessagesToSend(messagesCount); final ReactorConnectionCache<ServiceBusReactorAmqpConnection> connectionCache = createConnectionCache(connectionSupplier); final ServiceBusSenderAsyncClient sender = createSenderAsyncClient(connectionCache, false); try { try (VirtualTimeStepVerifier verifier = new VirtualTimeStepVerifier()) { verifier.create(() -> sender.sendMessages(messagesToSend)) .thenAwait(VIRTUAL_TIME_SHIFT) .verifyErrorSatisfies(e -> { Assertions.assertTrue(e instanceof ServiceBusException); final ServiceBusException se = (ServiceBusException) e; Assertions.assertSame(NON_RETRIABLE_ERROR_1, se.getCause()); }); } verify(endpoint.getAmqpSendLink(0, 0)).send(sendMessagesCaptor0.capture()); final List<Message> messagesSendInSession0Link0 = sendMessagesCaptor0.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession0Link0.size()); verify(endpoint.getAmqpSendLink(0, 1)).send(sendMessagesCaptor1.capture()); final List<Message> messagesSendInSession0Link1 = sendMessagesCaptor1.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession0Link1.size()); verifyNoInteractions(onClientClosed); } finally { sender.close(); connectionCache.dispose(); } } } @Test @Execution(ExecutionMode.SAME_THREAD) void shouldRecoverFromRetriableSessionError() { final int sessionsCnt = 2; final int[] linksPerSession = new int[] { 1, 1 }; try (MockEndpoint endpoint = createMockEndpoint(sessionsCnt, linksPerSession)) { final Supplier<ServiceBusReactorAmqpConnection> connectionSupplier = singleConnectionSupplier(endpoint); final Answer<Mono<Void>> sendAnswer = new Answer<Mono<Void>>() { private int invocationCount = -1; @Override public Mono<Void> answer(InvocationOnMock invocation) { invocationCount++; switch (invocationCount) { case 0: endpoint.emitCurrentSessionError(RETRIABLE_SESSION_ERROR); return Mono.error(RETRIABLE_SESSION_ERROR); case 1: return Mono.empty(); default: throw new RuntimeException("More than two invocations of send-answer is not expected."); } } }; when(endpoint.getAmqpSendLink(0, 0).send(anyList())).thenAnswer(sendAnswer); when(endpoint.getAmqpSendLink(1, 0).send(anyList())).thenAnswer(sendAnswer); final int messagesCount = 4; final List<ServiceBusMessage> messagesToSend = createMessagesToSend(messagesCount); final ReactorConnectionCache<ServiceBusReactorAmqpConnection> connectionCache = createConnectionCache(connectionSupplier); final ServiceBusSenderAsyncClient sender = createSenderAsyncClient(connectionCache, false); try { try (VirtualTimeStepVerifier verifier = new VirtualTimeStepVerifier()) { verifier.create(() -> sender.sendMessages(messagesToSend)) .thenAwait(VIRTUAL_TIME_SHIFT) .verifyComplete(); } verify(endpoint.getAmqpSendLink(0, 0)).send(sendMessagesCaptor0.capture()); final List<Message> messagesSendInSession0Link0 = sendMessagesCaptor0.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession0Link0.size()); verify(endpoint.getAmqpSendLink(1, 0)).send(sendMessagesCaptor1.capture()); final List<Message> messagesSendInSession1Link0 = sendMessagesCaptor1.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession1Link0.size()); } finally { sender.close(); connectionCache.dispose(); } } } @Test @Execution(ExecutionMode.SAME_THREAD) void shouldBubbleUpNonRetriableSessionError() { final int sessionsCnt = 2; final int[] linksPerSession = new int[] { 1, 1 }; try (MockEndpoint endpoint = createMockEndpoint(sessionsCnt, linksPerSession)) { final Supplier<ServiceBusReactorAmqpConnection> connectionSupplier = singleConnectionSupplier(endpoint); final Answer<Mono<Void>> sendAnswer = new Answer<Mono<Void>>() { private int invocationCount = -1; @Override public Mono<Void> answer(InvocationOnMock invocation) { invocationCount++; switch (invocationCount) { case 0: endpoint.emitCurrentSessionError(RETRIABLE_SESSION_ERROR); return Mono.error(RETRIABLE_SESSION_ERROR); case 1: endpoint.emitCurrentSessionError(NON_RETRIABLE_ERROR_1); return Mono.error(NON_RETRIABLE_ERROR_1); default: throw new RuntimeException("More than two invocations of send-answer is not expected."); } } }; when(endpoint.getAmqpSendLink(0, 0).send(anyList())).thenAnswer(sendAnswer); when(endpoint.getAmqpSendLink(1, 0).send(anyList())).thenAnswer(sendAnswer); final int messagesCount = 4; final List<ServiceBusMessage> messagesToSend = createMessagesToSend(messagesCount); final ReactorConnectionCache<ServiceBusReactorAmqpConnection> connectionCache = createConnectionCache(connectionSupplier); final ServiceBusSenderAsyncClient sender = createSenderAsyncClient(connectionCache, false); try { try (VirtualTimeStepVerifier verifier = new VirtualTimeStepVerifier()) { verifier.create(() -> sender.sendMessages(messagesToSend)) .thenAwait(VIRTUAL_TIME_SHIFT) .verifyErrorSatisfies(e -> { Assertions.assertTrue(e instanceof ServiceBusException); final ServiceBusException se = (ServiceBusException) e; Assertions.assertSame(NON_RETRIABLE_ERROR_1, se.getCause()); }); } verify(endpoint.getAmqpSendLink(0, 0)).send(sendMessagesCaptor0.capture()); final List<Message> messagesSendInSession0Link0 = sendMessagesCaptor0.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession0Link0.size()); verify(endpoint.getAmqpSendLink(1, 0)).send(sendMessagesCaptor1.capture()); final List<Message> messagesSendInSession1Link0 = sendMessagesCaptor1.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession1Link0.size()); verifyNoInteractions(onClientClosed); } finally { sender.close(); connectionCache.dispose(); } } } @Test @Execution(ExecutionMode.SAME_THREAD) void shouldSenderReusableAfterNonRetriableLinkAndSessionError() { final int sessionsCnt = 3; final int[] linksPerSession = new int[] { 1, 1, 1 }; try (MockEndpoint endpoint = createMockEndpoint(sessionsCnt, linksPerSession)) { final Supplier<ServiceBusReactorAmqpConnection> connectionSupplier = new Supplier<ServiceBusReactorAmqpConnection>() { private int invocationCount = -1; @Override public ServiceBusReactorAmqpConnection get() { invocationCount++; switch (invocationCount) { case 0: final ServiceBusReactorAmqpConnection c = endpoint.arrange(); return c; default: throw new RuntimeException("More than one invocation of connection-supplier is not expected."); } } }; final Answer<Mono<Void>> sendAnswer = new Answer<Mono<Void>>() { private int invocationCount = -1; @Override public Mono<Void> answer(InvocationOnMock invocation) { invocationCount++; switch (invocationCount) { case 0: endpoint.emitCurrentSendLinkError(NON_RETRIABLE_ERROR_1); return Mono.error(NON_RETRIABLE_ERROR_1); case 1: endpoint.emitCurrentSessionError(NON_RETRIABLE_ERROR_2); return Mono.error(NON_RETRIABLE_ERROR_2); case 2: return Mono.empty(); default: throw new RuntimeException("More than three invocations of send-answer is not expected."); } } }; when(endpoint.getAmqpSendLink(0, 0).send(anyList())).thenAnswer(sendAnswer); when(endpoint.getAmqpSendLink(1, 0).send(anyList())).thenAnswer(sendAnswer); when(endpoint.getAmqpSendLink(2, 0).send(any(Message.class))).thenAnswer(sendAnswer); final int messagesCount = 4; final List<ServiceBusMessage> messagesToSend = createMessagesToSend(messagesCount); final ServiceBusMessage messageToSend = createMessageToSend(); final ReactorConnectionCache<ServiceBusReactorAmqpConnection> connectionCache = createConnectionCache(connectionSupplier); final ServiceBusSenderAsyncClient sender = createSenderAsyncClient(connectionCache, false); try { try (VirtualTimeStepVerifier verifier = new VirtualTimeStepVerifier()) { verifier.create(() -> sender.sendMessages(messagesToSend)) .thenAwait(VIRTUAL_TIME_SHIFT) .verifyErrorSatisfies(e -> { Assertions.assertTrue(e instanceof ServiceBusException); final ServiceBusException se = (ServiceBusException) e; Assertions.assertSame(NON_RETRIABLE_ERROR_1, se.getCause()); }); } try (VirtualTimeStepVerifier verifier = new VirtualTimeStepVerifier()) { verifier.create(() -> sender.sendMessages(messagesToSend)) .thenAwait(VIRTUAL_TIME_SHIFT) .verifyErrorSatisfies(e -> { Assertions.assertTrue(e instanceof ServiceBusException); final ServiceBusException se = (ServiceBusException) e; Assertions.assertSame(NON_RETRIABLE_ERROR_2, se.getCause()); }); } try (VirtualTimeStepVerifier verifier = new VirtualTimeStepVerifier()) { verifier.create(() -> sender.sendMessage(messageToSend)) .thenAwait(VIRTUAL_TIME_SHIFT) .verifyComplete(); } verify(endpoint.getAmqpSendLink(0, 0)).send(sendMessagesCaptor0.capture()); final List<Message> messagesSendInSession0Link0 = sendMessagesCaptor0.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession0Link0.size()); verify(endpoint.getAmqpSendLink(1, 0)).send(sendMessagesCaptor1.capture()); final List<Message> messagesSendInSession1Link0 = sendMessagesCaptor1.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession1Link0.size()); verify(endpoint.getAmqpSendLink(2, 0), times(1)).send(any(Message.class)); } finally { sender.close(); connectionCache.dispose(); } } } @Test @Execution(ExecutionMode.SAME_THREAD) void shouldRecoverFromRetriableConnectionError() { final int endpointsCount = 4; final List<SessionLinkCount> sessionLinkCountList = new ArrayList<>(endpointsCount); sessionLinkCountList.add(new SessionLinkCount(1, new int[] { 1 })); sessionLinkCountList.add(new SessionLinkCount(1, new int[] { 1 })); sessionLinkCountList.add(new SessionLinkCount(1, new int[] { 1 })); sessionLinkCountList.add(new SessionLinkCount(1, new int[] { 1 })); try (MockEndpoints endpoints = createMockEndpoints(sessionLinkCountList)) { final AtomicReference<MockEndpoint> currentEndpoint = new AtomicReference<>(); final Supplier<ServiceBusReactorAmqpConnection> connectionSupplier = new Supplier<ServiceBusReactorAmqpConnection>() { private int endpointIndex = -1; @Override public ServiceBusReactorAmqpConnection get() { endpointIndex++; if (endpointIndex >= endpointsCount) { throw new RuntimeException("More than " + endpointsCount + " invocation of connection-supplier is not expected."); } final MockEndpoint e = endpoints.get(endpointIndex); currentEndpoint.set(e); final ServiceBusReactorAmqpConnection c = e.arrange(); return c; } }; final Answer<Mono<Void>> sendAnswer = new Answer<Mono<Void>>() { private int invocationCount = -1; @Override public Mono<Void> answer(InvocationOnMock invocation) { invocationCount++; switch (invocationCount) { case 0: Assertions.assertEquals(endpoints.get(0), currentEndpoint.get()); endpoints.get(0).emitCurrentSendLinkError(RETRIABLE_LINK_ERROR); return Mono.error(RETRIABLE_LINK_ERROR); case 1: Assertions.assertEquals(endpoints.get(1), currentEndpoint.get()); endpoints.get(1).emitCurrentSessionError(RETRIABLE_SESSION_ERROR); return Mono.error(RETRIABLE_SESSION_ERROR); case 2: Assertions.assertEquals(endpoints.get(2), currentEndpoint.get()); endpoints.get(2).emitConnectionError(RETRIABLE_CONNECTION_ERROR); return Mono.error(RETRIABLE_CONNECTION_ERROR); case 3: Assertions.assertEquals(endpoints.get(3), currentEndpoint.get()); return Mono.empty(); default: throw new RuntimeException("More than three invocations of send-answer is not expected."); } } }; when(endpoints.get(0).getAmqpSendLink(0, 0).send(anyList())).thenAnswer(sendAnswer); when(endpoints.get(1).getAmqpSendLink(0, 0).send(anyList())).thenAnswer(sendAnswer); when(endpoints.get(2).getAmqpSendLink(0, 0).send(anyList())).thenAnswer(sendAnswer); when(endpoints.get(3).getAmqpSendLink(0, 0).send(anyList())).thenAnswer(sendAnswer); final int messagesCount = 4; final List<ServiceBusMessage> messagesToSend = createMessagesToSend(messagesCount); final ReactorConnectionCache<ServiceBusReactorAmqpConnection> connectionCache = createConnectionCache(connectionSupplier); final ServiceBusSenderAsyncClient sender = createSenderAsyncClient(connectionCache, false); try { try (VirtualTimeStepVerifier verifier = new VirtualTimeStepVerifier()) { verifier.create(() -> sender.sendMessages(messagesToSend)) .thenAwait(VIRTUAL_TIME_SHIFT) .verifyComplete(); } verify(endpoints.get(0).getAmqpSendLink(0, 0), times(1)).send(anyList()); verify(endpoints.get(1).getAmqpSendLink(0, 0), times(1)).send(anyList()); verify(endpoints.get(2).getAmqpSendLink(0, 0), times(1)).send(anyList()); verify(endpoints.get(3).getAmqpSendLink(0, 0), times(1)).send(anyList()); } finally { sender.close(); connectionCache.dispose(); } } } private MockEndpoint createMockEndpoint(int sessionsCnt, int[] linksPerSession) { final String connectionId = "1"; return MockEndpoint.create(connectionId, QUEUE_NAME, RETRY_OPTIONS, sessionsCnt, linksPerSession); } private MockEndpoints createMockEndpoints(List<SessionLinkCount> sessionLinkCountList) { return MockEndpoints.create(QUEUE_NAME, RETRY_OPTIONS, sessionLinkCountList); } private Supplier<ServiceBusReactorAmqpConnection> singleConnectionSupplier(MockEndpoint endpoint) { final Supplier<ServiceBusReactorAmqpConnection> connectionSupplier = new Supplier<ServiceBusReactorAmqpConnection>() { private int invocationCount = -1; @Override public ServiceBusReactorAmqpConnection get() { invocationCount++; switch (invocationCount) { case 0: final ServiceBusReactorAmqpConnection c = endpoint.arrange(); return c; default: throw new RuntimeException("More than one invocation of connection-supplier is not expected."); } } }; return connectionSupplier; } private ReactorConnectionCache<ServiceBusReactorAmqpConnection> createConnectionCache( Supplier<ServiceBusReactorAmqpConnection> connectionSupplier) { return new ReactorConnectionCache<>(connectionSupplier, FQDN, QUEUE_NAME, getRetryPolicy(RETRY_OPTIONS), new HashMap<>()); } private ServiceBusSenderAsyncClient createSenderAsyncClient( ReactorConnectionCache<ServiceBusReactorAmqpConnection> connectionCache, boolean isSharedConnection) { final ConnectionCacheWrapper connectionSupport = new ConnectionCacheWrapper(connectionCache); return new ServiceBusSenderAsyncClient(QUEUE_NAME, MessagingEntityType.QUEUE, connectionSupport, RETRY_OPTIONS, DEFAULT_INSTRUMENTATION, messageSerializer, onClientClosed, "", CLIENT_IDENTIFIER); } private static List<ServiceBusMessage> createMessagesToSend(int messagesCount) { return IntStream.range(0, messagesCount) .mapToObj(__ -> new ServiceBusMessage(TEST_CONTENTS)) .collect(Collectors.toList()); } private static ServiceBusMessage createMessageToSend() { final ServiceBusMessage messageToSend = new ServiceBusMessage(TEST_CONTENTS); return messageToSend; } private static final class VirtualTimeStepVerifier implements AutoCloseable { private final VirtualTimeScheduler scheduler; VirtualTimeStepVerifier() { scheduler = VirtualTimeScheduler.create(); } <T> StepVerifier.Step<T> create(Supplier<Mono<T>> scenarioSupplier) { return StepVerifier.withVirtualTime(scenarioSupplier, () -> scheduler, 0); } @Override public void close() { scheduler.dispose(); } } private static class SessionLinkCount { private final int sessionsCnt; private final int[] linksPerSession; SessionLinkCount(int sessionsCnt, int[] linksPerSession) { this.sessionsCnt = sessionsCnt; this.linksPerSession = linksPerSession; } } private static final class MockEndpoints implements Closeable { private final List<MockEndpoint> mockEndpoints; private final int mockEndpointsCnt; private MockEndpoints(List<MockEndpoint> mockEndpoints) { this.mockEndpoints = mockEndpoints; this.mockEndpointsCnt = this.mockEndpoints.size(); } static MockEndpoints create(String queueName, AmqpRetryOptions retryOptions, List<SessionLinkCount> sessionLinkCounts) { final List<MockEndpoint> mockEndpoints = new ArrayList<>(sessionLinkCounts.size()); int conId = 1; for (SessionLinkCount slc : sessionLinkCounts) { mockEndpoints.add(MockEndpoint.create(String.valueOf(conId), queueName, retryOptions, slc.sessionsCnt, slc.linksPerSession)); conId++; } return new MockEndpoints(mockEndpoints); } MockEndpoint get(int index) { if (index >= mockEndpointsCnt) { throw new IndexOutOfBoundsException("index:" + index + " maxIndex: " + (mockEndpointsCnt - 1)); } return mockEndpoints.get(index); } @Override public void close() { for (MockEndpoint mockEndpoint : mockEndpoints) { mockEndpoint.close(); } } } private static final class MockEndpoint implements Closeable { private final String connectionId; private final String queueName; private final AmqpRetryOptions retryOptions; private final MockSendSessions mockSendSessions; private final ConnectionOptions connectionOptions; private final Connection connection; private final Reactor reactor; private final ReactorDispatcher reactorDispatcher; private final ReactorExecutor reactorExecutor; private final ReactorProvider reactorProvider; private final ConnectionHandler connectionHandler; private final Sinks.Many<EndpointState> connectionStateSink; private final ReactorHandlerProvider handlerProvider; private final ServiceBusAmqpLinkProvider linkProvider; private final TokenManagerProvider tokenManagerProvider; private final TokenManager tokenManager; private final MessageSerializer messageSerializer; private final AtomicBoolean arranged = new AtomicBoolean(false); private MockEndpoint(String connectionId, String queueName, AmqpRetryOptions retryOptions, MockSendSessions mockSendSessions, ConnectionOptions connectionOptions, Connection connection, Reactor reactor, ReactorDispatcher reactorDispatcher, ReactorExecutor reactorExecutor, ReactorProvider reactorProvider, ConnectionHandler connectionHandler, Sinks.Many<EndpointState> connectionStateSink, ReactorHandlerProvider handlerProvider, ServiceBusAmqpLinkProvider linkProvider, TokenManagerProvider tokenManagerProvider, TokenManager tokenManager, MessageSerializer messageSerializer) { this.connectionId = connectionId; this.queueName = queueName; this.retryOptions = retryOptions; this.mockSendSessions = mockSendSessions; this.connectionOptions = connectionOptions; this.connection = connection; this.reactor = reactor; this.reactorDispatcher = reactorDispatcher; this.reactorExecutor = reactorExecutor; this.reactorProvider = reactorProvider; this.connectionHandler = connectionHandler; this.connectionStateSink = connectionStateSink; this.handlerProvider = handlerProvider; this.linkProvider = linkProvider; this.tokenManagerProvider = tokenManagerProvider; this.tokenManager = tokenManager; this.messageSerializer = messageSerializer; } static MockEndpoint create(String connectionId, String queueName, AmqpRetryOptions retryOptions, int sessionsCnt, int[] linksPerSession) { Assertions.assertNotNull(retryOptions); Assertions.assertTrue(sessionsCnt > 0, "sessionsCnt must be > 0."); Assertions.assertEquals(sessionsCnt, linksPerSession.length); for (int linksCnt = 0; linksCnt < linksPerSession.length; linksCnt++) { Assertions.assertTrue(linksCnt >= 0, "links-count in linksPerSession must be >= 0."); } final MockSendSessions mockSendSessions = MockSendSessions.create(connectionId, sessionsCnt, linksPerSession); final ConnectionOptions connectionOptions = mock(ConnectionOptions.class); final Connection connection = mock(Connection.class); final Sinks.Many<EndpointState> connectionStateSink = Sinks.many().replay() .latestOrDefault(EndpointState.UNINITIALIZED); final ConnectionHandler connectionHandler = mock(ConnectionHandler.class); final Reactor reactor = mock(Reactor.class); final ReactorDispatcher reactorDispatcher = mock(ReactorDispatcher.class); final ReactorExecutor reactorExecutor = mock(ReactorExecutor.class); final ReactorProvider reactorProvider = mock(ReactorProvider.class); final ReactorHandlerProvider handlerProvider = mock(ReactorHandlerProvider.class); final ServiceBusAmqpLinkProvider linkProvider = mock(ServiceBusAmqpLinkProvider.class); final TokenManager tokenManager = mock(TokenManager.class); final TokenManagerProvider tokenManagerProvider = mock(TokenManagerProvider.class); final MessageSerializer messageSerializer = mock(MessageSerializer.class); return new MockEndpoint(connectionId, queueName, retryOptions, mockSendSessions, connectionOptions, connection, reactor, reactorDispatcher, reactorExecutor, reactorProvider, connectionHandler, connectionStateSink, handlerProvider, linkProvider, tokenManagerProvider, tokenManager, messageSerializer); } ServiceBusReactorAmqpConnection arrange() { if (arranged.getAndSet(true)) { throw new RuntimeException("Only one connection can be obtained from a MockEndpoint instance."); } mockSendSessions.arrange(handlerProvider, linkProvider, connection, connectionStateSink); when(connectionOptions.getRetry()).thenReturn(retryOptions); doNothing().when(connection).close(); connectionStateSink.emitNext(EndpointState.ACTIVE, Sinks.EmitFailureHandler.FAIL_FAST); when(connectionHandler.getEndpointStates()).thenReturn(connectionStateSink.asFlux().distinctUntilChanged()); doNothing().when(connectionHandler).close(); when(reactor.connectionToHost(any(), anyInt(), any())).thenReturn(connection); try { doAnswer(invocation -> { final Runnable work = invocation.getArgument(0); work.run(); return null; }).when(reactorDispatcher).invoke(any(Runnable.class)); } catch (IOException ioe) { throw new UncheckedIOException(ioe); } when(reactorDispatcher.getShutdownSignal()).thenReturn(Mono.empty()); doNothing().when(reactorExecutor).start(); when(reactorExecutor.closeAsync()).thenReturn(Mono.empty()); try { when(reactorProvider.createReactor(anyString(), anyInt())).thenReturn(reactor); } catch (IOException ioe) { throw new UncheckedIOException(ioe); } when(reactorProvider.getReactorDispatcher()).thenReturn(reactorDispatcher); when(reactorProvider.createExecutor(any(), anyString(), any(), any(), any())).thenReturn(reactorExecutor); when(handlerProvider.createConnectionHandler(anyString(), any())).thenReturn(connectionHandler); when(tokenManager.authorize()).thenReturn(Mono.just(Duration.ofHours(1).toMillis())); when(tokenManagerProvider.getTokenManager(any(), anyString())).thenReturn(tokenManager); final boolean isV2 = true; return new ServiceBusReactorAmqpConnection(connectionId, connectionOptions, reactorProvider, handlerProvider, linkProvider, tokenManagerProvider, messageSerializer, false, isV2); } AmqpSendLink getAmqpSendLink(int sessionIdx, int linkIdx) { return mockSendSessions.getAmqpSendLink(sessionIdx, linkIdx); } void emitConnectionError(Throwable throwable) { connectionStateSink.emitError(throwable, Sinks.EmitFailureHandler.FAIL_FAST); } void emitConnectionCompletion() { connectionStateSink.emitComplete(Sinks.EmitFailureHandler.FAIL_FAST); } void emitCurrentSessionState(EndpointState state) { final MockSendSession session = mockSendSessions.getCurrentSendSession(); session.emitSessionState(state); } void emitCurrentSessionError(Throwable throwable) { final MockSendSession session = mockSendSessions.getCurrentSendSession(); session.emitSessionError(throwable); } void emitCurrentSessionCompletion() { final MockSendSession session = mockSendSessions.getCurrentSendSession(); session.emitSessionCompletion(); } void emitCurrentSendLinkState(EndpointState state) { final MockSendLink sendLink = mockSendSessions.getCurrentSendLink(); sendLink.emitSendLinkState(state); } void emitCurrentSendLinkError(Throwable throwable) { final MockSendLink sendLink = mockSendSessions.getCurrentSendLink(); sendLink.emitSendLinkError(throwable); } void emitCurrentSendLinkCompletion() { final MockSendLink sendLink = mockSendSessions.getCurrentSendLink(); sendLink.emitSendLinkCompletion(); } @Override public void close() { Mockito.framework().clearInlineMock(connectionOptions); Mockito.framework().clearInlineMock(connection); Mockito.framework().clearInlineMock(connectionHandler); Mockito.framework().clearInlineMock(reactor); Mockito.framework().clearInlineMock(reactorDispatcher); Mockito.framework().clearInlineMock(reactorExecutor); Mockito.framework().clearInlineMock(reactorProvider); Mockito.framework().clearInlineMock(handlerProvider); Mockito.framework().clearInlineMock(linkProvider); Mockito.framework().clearInlineMock(tokenManager); Mockito.framework().clearInlineMock(tokenManagerProvider); Mockito.framework().clearInlineMock(messageSerializer); mockSendSessions.close(); } } private static final class MockSendSessions implements Closeable { private final Object lock = new Object(); private final List<MockSendSession> mockSendSessions; private final MockSendSession terminalMockSendSession; private final int sessionsCnt; private int sessionIdx; private MockSendSession currentMockSendSession; private MockSendSessions(List<MockSendSession> mockSendSessions, MockSendSession terminalMockSendSession) { this.mockSendSessions = mockSendSessions; this.terminalMockSendSession = terminalMockSendSession; this.sessionsCnt = this.mockSendSessions.size(); this.sessionIdx = 0; } static MockSendSessions create(String connectionId, int sessionsCnt, int[] linksPerSession) { final List<MockSendSession> mockSendSessions = new ArrayList<>(sessionsCnt); for (int i = 0; i < sessionsCnt; i++) { mockSendSessions.add(MockSendSession.create(connectionId, linksPerSession[i])); } final MockSendSession terminalMockSendSession = MockSendSession.create(connectionId, 0); return new MockSendSessions(Collections.unmodifiableList(mockSendSessions), terminalMockSendSession); } void arrange(ReactorHandlerProvider handlerProvider, AmqpLinkProvider linkProvider, Connection connection, Sinks.Many<EndpointState> connectionStateSink) { for (MockSendSession mockSession : mockSendSessions) { mockSession.arrange(); mockSession.emitSessionState(EndpointState.ACTIVE); } terminalMockSendSession.arrange(); terminalMockSendSession.emitSessionCompletion(); when(handlerProvider.createSessionHandler(anyString(), any(), anyString(), any())) .thenAnswer(invocation -> { final MockSendSession session = moveToNextSendSession(connectionStateSink); return session.getSessionHandler(); }); when(handlerProvider.createSendLinkHandler(anyString(), any(), anyString(), anyString())) .thenAnswer(invocation -> { final MockSendLink sendLink = moveToNextSendLinkInCurrentSession(); return sendLink.getSendLinkHandler(); }); when(linkProvider.createSendLink(any(ServiceBusReactorAmqpConnection.class), anyString(), any(Sender.class), any(SendLinkHandler.class), any(ReactorProvider.class), any(TokenManager.class), any(MessageSerializer.class), any(AmqpRetryOptions.class), any(Scheduler.class), any())) .thenAnswer(invocation -> { final SendLinkHandler sendLinkHandler = invocation.getArgument(3); final AmqpSendLink amqpSendLink = lookupAmqpSendLinkFor(sendLinkHandler); return amqpSendLink; }); final ArrayList<Session> qpidSessions = new ArrayList<>(sessionsCnt + 1); for (MockSendSession mockSendSession : mockSendSessions) { qpidSessions.add(mockSendSession.getQpidSession()); } qpidSessions.add(terminalMockSendSession.getQpidSession()); when(connection.session()) .thenReturn(qpidSessions.get(0), qpidSessions.subList(1, sessionsCnt + 1).toArray(new Session[0])); } AmqpSendLink getAmqpSendLink(int sessionIdx, int linkIdx) { Assertions.assertTrue(sessionIdx >= 0 && sessionIdx < sessionsCnt, "sessionIdx is not in range."); final MockSendSession session = mockSendSessions.get(sessionIdx); return session.getAmqpSendLink(linkIdx); } MockSendSession getCurrentSendSession() { final MockSendSession session; synchronized (lock) { session = Objects.requireNonNull(currentMockSendSession, "Current Session is null"); } return session; } MockSendLink getCurrentSendLink() { final MockSendLink sendLink; synchronized (lock) { final MockSendSession session = getCurrentSendSession(); sendLink = session.getCurrentSendLink(); } return sendLink; } private MockSendSession moveToNextSendSession(Sinks.Many<EndpointState> connectionStateSink) { final MockSendSession nextSession; synchronized (lock) { if (sessionIdx >= sessionsCnt) { nextSession = terminalMockSendSession; } else { nextSession = mockSendSessions.get(sessionIdx); sessionIdx++; } this.currentMockSendSession = nextSession; } if (isTerminalSession(nextSession)) { connectionStateSink.emitComplete(Sinks.EmitFailureHandler.FAIL_FAST); } return nextSession; } private MockSendLink moveToNextSendLinkInCurrentSession() { final MockSendSession session; final MockSendLink nextSendLink; synchronized (lock) { session = Objects.requireNonNull(currentMockSendSession, "Current Session is null"); nextSendLink = session.moveToNextSendLink(); } if (session.isTerminalSendLink(nextSendLink)) { session.emitSessionCompletion(); } return nextSendLink; } private AmqpSendLink lookupAmqpSendLinkFor(SendLinkHandler sendLinkHandler) { for (MockSendSession mockSendSession : mockSendSessions) { final AmqpSendLink amqpSendLink = mockSendSession.lookupAmqpSendLinkFor(sendLinkHandler); if (amqpSendLink != null) { return amqpSendLink; } } final AmqpSendLink amqpSendLink = terminalMockSendSession.lookupAmqpSendLinkFor(sendLinkHandler); if (amqpSendLink != null) { return amqpSendLink; } throw new NullPointerException("Lookup for AmqpSendLink failed."); } private boolean isTerminalSession(MockSendSession session) { return session == terminalMockSendSession; } @Override public void close() { for (MockSendSession mockSendSession : mockSendSessions) { mockSendSession.close(); } terminalMockSendSession.close(); } } private static final class MockSendSession implements Closeable { private final String connectionId; private final Session session; private final Record sessionAttachments; private final SessionHandler sessionHandler; private final Sinks.Many<EndpointState> sessionStateSink; private final List<MockSendLink> mockSendLinks; private final MockSendLink terminalMockSendLink; private final int sendLinkCnt; private int sendLinkIdx; private MockSendLink currentMockSendLink; private MockSendSession(String connectionId, Session session, Record sessionAttachments, SessionHandler sessionHandler, Sinks.Many<EndpointState> sessionStateSink, List<MockSendLink> mockSendLinks, MockSendLink terminalMockSendLink) { this.connectionId = connectionId; this.session = session; this.sessionAttachments = sessionAttachments; this.sessionHandler = sessionHandler; this.sessionStateSink = sessionStateSink; this.mockSendLinks = mockSendLinks; this.terminalMockSendLink = terminalMockSendLink; this.sendLinkCnt = this.mockSendLinks.size(); this.sendLinkIdx = 0; } static MockSendSession create(String connectionId, int sendLinkCnt) { final List<MockSendLink> mockSendLinks = new ArrayList<>(sendLinkCnt); for (int i = 0; i < sendLinkCnt; i++) { mockSendLinks.add(MockSendLink.create()); } final MockSendLink terminalMockSendLink = MockSendLink.create(); final Record sessionAttachments = mock(Record.class); final Session session = mock(Session.class); final SessionHandler sessionHandler = mock(SessionHandler.class); final Sinks.Many<EndpointState> sessionStateSink = Sinks.many().replay() .latestOrDefault(EndpointState.UNINITIALIZED); return new MockSendSession(connectionId, session, sessionAttachments, sessionHandler, sessionStateSink, Collections.unmodifiableList(mockSendLinks), terminalMockSendLink); } void arrange() { for (MockSendLink mockSendLink : mockSendLinks) { mockSendLink.arrange(); mockSendLink.emitSendLinkState(EndpointState.ACTIVE); } terminalMockSendLink.arrange(); terminalMockSendLink.emitSendLinkCompletion(); final Answer<Mono<Void>> terminalSendAnswer = new Answer<Mono<Void>>() { @Override public Mono<Void> answer(InvocationOnMock invocation) { return Mono.error(new AmqpException(true, "terminal-send-link-result", null)); } }; when(terminalMockSendLink.getAmqpSendLink().send(anyList())).then(terminalSendAnswer); when(terminalMockSendLink.getAmqpSendLink().send(any(Message.class))).then(terminalSendAnswer); when(terminalMockSendLink.getAmqpSendLink().send(any(Message.class), any(DeliveryState.class))).then(terminalSendAnswer); when(terminalMockSendLink.getAmqpSendLink().send(anyList(), any(DeliveryState.class))).then(terminalSendAnswer); when(terminalMockSendLink.getAmqpSendLink().send(any(), anyInt(), anyInt(), any(DeliveryState.class))).then(terminalSendAnswer); doNothing().when(sessionAttachments).set(any(), any(), anyString()); when(session.attachments()).thenReturn(sessionAttachments); doNothing().when(session).open(); doNothing().when(session).setCondition(any()); when(sessionHandler.getConnectionId()).thenReturn(connectionId); when(sessionHandler.getEndpointStates()).thenReturn(sessionStateSink.asFlux().distinctUntilChanged()); doNothing().when(sessionHandler).close(); final ArrayList<Sender> qpidSenders = new ArrayList<>(sendLinkCnt + 1); for (MockSendLink mockSendLink : mockSendLinks) { qpidSenders.add(mockSendLink.getQpidSender()); } qpidSenders.add(terminalMockSendLink.getQpidSender()); when(session.sender(any())) .thenReturn(qpidSenders.get(0), qpidSenders.subList(1, sendLinkCnt + 1).toArray(new Sender[0])); } Session getQpidSession() { return session; } SessionHandler getSessionHandler() { return sessionHandler; } AmqpSendLink getAmqpSendLink(int linkIdx) { Assertions.assertTrue(linkIdx >= 0 && linkIdx < sendLinkCnt, "linkIdx is not in range."); return mockSendLinks.get(linkIdx).getAmqpSendLink(); } MockSendLink moveToNextSendLink() { final MockSendLink nextSendLink; if (sendLinkIdx >= sendLinkCnt) { nextSendLink = terminalMockSendLink; } else { nextSendLink = mockSendLinks.get(sendLinkIdx); sendLinkIdx++; } currentMockSendLink = nextSendLink; return nextSendLink; } boolean isTerminalSendLink(MockSendLink link) { return this.terminalMockSendLink == link; } MockSendLink getCurrentSendLink() { return Objects.requireNonNull(currentMockSendLink, "Current Link is null"); } AmqpSendLink lookupAmqpSendLinkFor(SendLinkHandler sendLinkHandler) { for (MockSendLink sendLink : mockSendLinks) { if (sendLink.getSendLinkHandler() == sendLinkHandler) { return sendLink.getAmqpSendLink(); } } if (terminalMockSendLink.getSendLinkHandler() == sendLinkHandler) { return terminalMockSendLink.getAmqpSendLink(); } return null; } void emitSessionState(EndpointState state) { this.sessionStateSink.emitNext(state, Sinks.EmitFailureHandler.FAIL_FAST); } void emitSessionError(Throwable error) { this.sessionStateSink.emitError(error, Sinks.EmitFailureHandler.FAIL_FAST); } void emitSessionCompletion() { this.sessionStateSink.emitComplete(Sinks.EmitFailureHandler.FAIL_FAST); } @Override public void close() { Mockito.framework().clearInlineMock(session); Mockito.framework().clearInlineMock(sessionAttachments); Mockito.framework().clearInlineMock(sessionHandler); for (MockSendLink sendLink : mockSendLinks) { sendLink.close(); } terminalMockSendLink.close(); } } private static final class MockSendLink implements Closeable { private final Sender sender; private final Record senderAttachments; private final AmqpSendLink amqpSendLink; private final SendLinkHandler sendLinkHandler; private final Sinks.Many<EndpointState> sendLinkStateSink; private MockSendLink(Sender sender, Record senderAttachments, AmqpSendLink amqpSendLink, SendLinkHandler sendLinkHandler, Sinks.Many<EndpointState> sendLinkStateSink) { this.sender = sender; this.senderAttachments = senderAttachments; this.amqpSendLink = amqpSendLink; this.sendLinkHandler = sendLinkHandler; this.sendLinkStateSink = sendLinkStateSink; } static MockSendLink create() { final Record senderAttachments = mock(Record.class); final Sender sender = mock(Sender.class); final AmqpSendLink amqpSendLink = mock(AmqpSendLink.class); final SendLinkHandler sendLinkHandler = mock(SendLinkHandler.class); final Sinks.Many<EndpointState> sendLinkStateSink = Sinks.many().replay() .latestOrDefault(EndpointState.UNINITIALIZED); return new MockSendLink(sender, senderAttachments, amqpSendLink, sendLinkHandler, sendLinkStateSink); } void arrange() { doNothing().when(senderAttachments).set(any(), any(), anyString()); when(sender.attachments()).thenReturn(senderAttachments); doNothing().when(sender).setTarget(any()); doNothing().when(sender).setSenderSettleMode(any()); doNothing().when(sender).setProperties(any()); doNothing().when(sender).setSource(any()); doNothing().when(sender).open(); when(amqpSendLink.getLinkSize()).thenReturn(Mono.just(ServiceBusSenderAsyncClient.MAX_MESSAGE_LENGTH_BYTES)); when(amqpSendLink.getEndpointStates()) .thenReturn(sendLinkStateSink.asFlux().distinctUntilChanged().map(state -> toAmqpEndpointState(state))); when(sendLinkHandler.getEndpointStates()).thenReturn(sendLinkStateSink.asFlux().distinctUntilChanged()); doNothing().when(sendLinkHandler).close(); } Sender getQpidSender() { return sender; } AmqpSendLink getAmqpSendLink() { return amqpSendLink; } SendLinkHandler getSendLinkHandler() { return sendLinkHandler; } void emitSendLinkState(EndpointState state) { this.sendLinkStateSink.emitNext(state, Sinks.EmitFailureHandler.FAIL_FAST); } void emitSendLinkError(Throwable error) { this.sendLinkStateSink.emitError(error, Sinks.EmitFailureHandler.FAIL_FAST); } void emitSendLinkCompletion() { this.sendLinkStateSink.emitComplete(Sinks.EmitFailureHandler.FAIL_FAST); } private static AmqpEndpointState toAmqpEndpointState(EndpointState state) { switch (state) { case ACTIVE: return AmqpEndpointState.ACTIVE; case UNINITIALIZED: return AmqpEndpointState.UNINITIALIZED; case CLOSED: return AmqpEndpointState.CLOSED; default: throw new IllegalArgumentException("This endpoint state is not supported. State:" + state); } } @Override public void close() { Mockito.framework().clearInlineMock(sender); Mockito.framework().clearInlineMock(senderAttachments); Mockito.framework().clearInlineMock(amqpSendLink); Mockito.framework().clearInlineMock(sendLinkHandler); } } }
class ServiceBusSenderAsyncClientRecoveryIsolatedTest { private static final BinaryData TEST_CONTENTS = BinaryData.fromString("My message for service bus queue!"); private static final String FQDN = "contoso-shopping.servicebus.windows.net"; private static final String QUEUE_NAME = "orders"; private static final String CLIENT_IDENTIFIER = "client-identifier"; private static final ServiceBusSenderInstrumentation DEFAULT_INSTRUMENTATION = new ServiceBusSenderInstrumentation( null, null, FQDN, QUEUE_NAME); private static final Duration VIRTUAL_TIME_SHIFT = OPERATION_TIMEOUT.plusSeconds(30); private static final AmqpException RETRIABLE_LINK_ERROR = new AmqpException(true, AmqpErrorCondition.LINK_DETACH_FORCED, "detach-link-error", new AmqpErrorContext(FQDN)); private static final AmqpException RETRIABLE_SESSION_ERROR = new AmqpException(true, "session-error", new AmqpErrorContext(FQDN)); private static final AmqpException RETRIABLE_CONNECTION_ERROR = new AmqpException(true, AmqpErrorCondition.CONNECTION_FORCED, "connection-forced-error", new AmqpErrorContext(FQDN)); private static final AmqpException NON_RETRIABLE_ERROR_1 = new AmqpException(false, AmqpErrorCondition.NOT_ALLOWED, "not-allowed-error-1", new AmqpErrorContext(FQDN)); private static final AmqpException NON_RETRIABLE_ERROR_2 = new AmqpException(false, AmqpErrorCondition.NOT_ALLOWED, "not-allowed-error-2", new AmqpErrorContext(FQDN)); private static final AmqpRetryOptions RETRY_OPTIONS = new AmqpRetryOptions() .setMode(AmqpRetryMode.FIXED) .setMaxRetries(10) .setMaxDelay(Duration.ofSeconds(5)) .setDelay(Duration.ofSeconds(1)) .setTryTimeout(OPERATION_TIMEOUT); private final MessageSerializer messageSerializer = new ServiceBusMessageSerializer(); @Mock private Runnable onClientClosed; @Captor private ArgumentCaptor<List<Message>> sendMessagesCaptor0; @Captor private ArgumentCaptor<List<Message>> sendMessagesCaptor1; private AutoCloseable mocksCloseable; @BeforeEach void setup() { mocksCloseable = MockitoAnnotations.openMocks(this); } @AfterEach void teardown() throws Exception { Mockito.framework().clearInlineMock(this); if (mocksCloseable != null) { mocksCloseable.close(); } } @Test @Execution(ExecutionMode.SAME_THREAD) @Test @Execution(ExecutionMode.SAME_THREAD) void shouldBubbleUpNonRetriableSendLinkError() { final int sessionsCnt = 1; final int[] linksPerSession = new int[] { 2 }; try (MockEndpoint endpoint = createMockEndpoint(sessionsCnt, linksPerSession)) { final Supplier<ServiceBusReactorAmqpConnection> connectionSupplier = singleConnectionSupplier(endpoint); final Answer<Mono<Void>> sendAnswer = new Answer<Mono<Void>>() { private int invocationCount = -1; @Override public Mono<Void> answer(InvocationOnMock invocation) { invocationCount++; switch (invocationCount) { case 0: endpoint.emitCurrentSendLinkError(RETRIABLE_LINK_ERROR); return Mono.error(RETRIABLE_LINK_ERROR); case 1: endpoint.emitCurrentSendLinkError(NON_RETRIABLE_ERROR_1); return Mono.error(NON_RETRIABLE_ERROR_1); default: throw new RuntimeException("More than two invocations of send-answer is not expected."); } } }; when(endpoint.getAmqpSendLink(0, 0).send(anyList())).thenAnswer(sendAnswer); when(endpoint.getAmqpSendLink(0, 1).send(anyList())).thenAnswer(sendAnswer); final int messagesCount = 4; final List<ServiceBusMessage> messagesToSend = createMessagesToSend(messagesCount); final ReactorConnectionCache<ServiceBusReactorAmqpConnection> connectionCache = createConnectionCache(connectionSupplier); final ServiceBusSenderAsyncClient sender = createSenderAsyncClient(connectionCache, false); try { try (VirtualTimeStepVerifier verifier = new VirtualTimeStepVerifier()) { verifier.create(() -> sender.sendMessages(messagesToSend)) .thenAwait(VIRTUAL_TIME_SHIFT) .verifyErrorSatisfies(e -> { Assertions.assertTrue(e instanceof ServiceBusException); final ServiceBusException se = (ServiceBusException) e; Assertions.assertSame(NON_RETRIABLE_ERROR_1, se.getCause()); }); } verify(endpoint.getAmqpSendLink(0, 0)).send(sendMessagesCaptor0.capture()); final List<Message> messagesSendInSession0Link0 = sendMessagesCaptor0.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession0Link0.size()); verify(endpoint.getAmqpSendLink(0, 1)).send(sendMessagesCaptor1.capture()); final List<Message> messagesSendInSession0Link1 = sendMessagesCaptor1.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession0Link1.size()); verifyNoInteractions(onClientClosed); } finally { sender.close(); connectionCache.dispose(); } } } @Test @Execution(ExecutionMode.SAME_THREAD) void shouldRecoverFromRetriableSessionError() { final int sessionsCnt = 2; final int[] linksPerSession = new int[] { 1, 1 }; try (MockEndpoint endpoint = createMockEndpoint(sessionsCnt, linksPerSession)) { final Supplier<ServiceBusReactorAmqpConnection> connectionSupplier = singleConnectionSupplier(endpoint); final Answer<Mono<Void>> sendAnswer = new Answer<Mono<Void>>() { private int invocationCount = -1; @Override public Mono<Void> answer(InvocationOnMock invocation) { invocationCount++; switch (invocationCount) { case 0: endpoint.emitCurrentSessionError(RETRIABLE_SESSION_ERROR); return Mono.error(RETRIABLE_SESSION_ERROR); case 1: return Mono.empty(); default: throw new RuntimeException("More than two invocations of send-answer is not expected."); } } }; when(endpoint.getAmqpSendLink(0, 0).send(anyList())).thenAnswer(sendAnswer); when(endpoint.getAmqpSendLink(1, 0).send(anyList())).thenAnswer(sendAnswer); final int messagesCount = 4; final List<ServiceBusMessage> messagesToSend = createMessagesToSend(messagesCount); final ReactorConnectionCache<ServiceBusReactorAmqpConnection> connectionCache = createConnectionCache(connectionSupplier); final ServiceBusSenderAsyncClient sender = createSenderAsyncClient(connectionCache, false); try { try (VirtualTimeStepVerifier verifier = new VirtualTimeStepVerifier()) { verifier.create(() -> sender.sendMessages(messagesToSend)) .thenAwait(VIRTUAL_TIME_SHIFT) .verifyComplete(); } verify(endpoint.getAmqpSendLink(0, 0)).send(sendMessagesCaptor0.capture()); final List<Message> messagesSendInSession0Link0 = sendMessagesCaptor0.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession0Link0.size()); verify(endpoint.getAmqpSendLink(1, 0)).send(sendMessagesCaptor1.capture()); final List<Message> messagesSendInSession1Link0 = sendMessagesCaptor1.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession1Link0.size()); } finally { sender.close(); connectionCache.dispose(); } } } @Test @Execution(ExecutionMode.SAME_THREAD) void shouldBubbleUpNonRetriableSessionError() { final int sessionsCnt = 2; final int[] linksPerSession = new int[] { 1, 1 }; try (MockEndpoint endpoint = createMockEndpoint(sessionsCnt, linksPerSession)) { final Supplier<ServiceBusReactorAmqpConnection> connectionSupplier = singleConnectionSupplier(endpoint); final Answer<Mono<Void>> sendAnswer = new Answer<Mono<Void>>() { private int invocationCount = -1; @Override public Mono<Void> answer(InvocationOnMock invocation) { invocationCount++; switch (invocationCount) { case 0: endpoint.emitCurrentSessionError(RETRIABLE_SESSION_ERROR); return Mono.error(RETRIABLE_SESSION_ERROR); case 1: endpoint.emitCurrentSessionError(NON_RETRIABLE_ERROR_1); return Mono.error(NON_RETRIABLE_ERROR_1); default: throw new RuntimeException("More than two invocations of send-answer is not expected."); } } }; when(endpoint.getAmqpSendLink(0, 0).send(anyList())).thenAnswer(sendAnswer); when(endpoint.getAmqpSendLink(1, 0).send(anyList())).thenAnswer(sendAnswer); final int messagesCount = 4; final List<ServiceBusMessage> messagesToSend = createMessagesToSend(messagesCount); final ReactorConnectionCache<ServiceBusReactorAmqpConnection> connectionCache = createConnectionCache(connectionSupplier); final ServiceBusSenderAsyncClient sender = createSenderAsyncClient(connectionCache, false); try { try (VirtualTimeStepVerifier verifier = new VirtualTimeStepVerifier()) { verifier.create(() -> sender.sendMessages(messagesToSend)) .thenAwait(VIRTUAL_TIME_SHIFT) .verifyErrorSatisfies(e -> { Assertions.assertTrue(e instanceof ServiceBusException); final ServiceBusException se = (ServiceBusException) e; Assertions.assertSame(NON_RETRIABLE_ERROR_1, se.getCause()); }); } verify(endpoint.getAmqpSendLink(0, 0)).send(sendMessagesCaptor0.capture()); final List<Message> messagesSendInSession0Link0 = sendMessagesCaptor0.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession0Link0.size()); verify(endpoint.getAmqpSendLink(1, 0)).send(sendMessagesCaptor1.capture()); final List<Message> messagesSendInSession1Link0 = sendMessagesCaptor1.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession1Link0.size()); verifyNoInteractions(onClientClosed); } finally { sender.close(); connectionCache.dispose(); } } } @Test @Execution(ExecutionMode.SAME_THREAD) void shouldSenderReusableAfterNonRetriableLinkAndSessionError() { final int sessionsCnt = 3; final int[] linksPerSession = new int[] { 1, 1, 1 }; try (MockEndpoint endpoint = createMockEndpoint(sessionsCnt, linksPerSession)) { final Supplier<ServiceBusReactorAmqpConnection> connectionSupplier = new Supplier<ServiceBusReactorAmqpConnection>() { private int invocationCount = -1; @Override public ServiceBusReactorAmqpConnection get() { invocationCount++; switch (invocationCount) { case 0: final ServiceBusReactorAmqpConnection c = endpoint.arrange(); return c; default: throw new RuntimeException("More than one invocation of connection-supplier is not expected."); } } }; final Answer<Mono<Void>> sendAnswer = new Answer<Mono<Void>>() { private int invocationCount = -1; @Override public Mono<Void> answer(InvocationOnMock invocation) { invocationCount++; switch (invocationCount) { case 0: endpoint.emitCurrentSendLinkError(NON_RETRIABLE_ERROR_1); return Mono.error(NON_RETRIABLE_ERROR_1); case 1: endpoint.emitCurrentSessionError(NON_RETRIABLE_ERROR_2); return Mono.error(NON_RETRIABLE_ERROR_2); case 2: return Mono.empty(); default: throw new RuntimeException("More than three invocations of send-answer is not expected."); } } }; when(endpoint.getAmqpSendLink(0, 0).send(anyList())).thenAnswer(sendAnswer); when(endpoint.getAmqpSendLink(1, 0).send(anyList())).thenAnswer(sendAnswer); when(endpoint.getAmqpSendLink(2, 0).send(any(Message.class))).thenAnswer(sendAnswer); final int messagesCount = 4; final List<ServiceBusMessage> messagesToSend = createMessagesToSend(messagesCount); final ServiceBusMessage messageToSend = createMessageToSend(); final ReactorConnectionCache<ServiceBusReactorAmqpConnection> connectionCache = createConnectionCache(connectionSupplier); final ServiceBusSenderAsyncClient sender = createSenderAsyncClient(connectionCache, false); try { try (VirtualTimeStepVerifier verifier = new VirtualTimeStepVerifier()) { verifier.create(() -> sender.sendMessages(messagesToSend)) .thenAwait(VIRTUAL_TIME_SHIFT) .verifyErrorSatisfies(e -> { Assertions.assertTrue(e instanceof ServiceBusException); final ServiceBusException se = (ServiceBusException) e; Assertions.assertSame(NON_RETRIABLE_ERROR_1, se.getCause()); }); } try (VirtualTimeStepVerifier verifier = new VirtualTimeStepVerifier()) { verifier.create(() -> sender.sendMessages(messagesToSend)) .thenAwait(VIRTUAL_TIME_SHIFT) .verifyErrorSatisfies(e -> { Assertions.assertTrue(e instanceof ServiceBusException); final ServiceBusException se = (ServiceBusException) e; Assertions.assertSame(NON_RETRIABLE_ERROR_2, se.getCause()); }); } try (VirtualTimeStepVerifier verifier = new VirtualTimeStepVerifier()) { verifier.create(() -> sender.sendMessage(messageToSend)) .thenAwait(VIRTUAL_TIME_SHIFT) .verifyComplete(); } verify(endpoint.getAmqpSendLink(0, 0)).send(sendMessagesCaptor0.capture()); final List<Message> messagesSendInSession0Link0 = sendMessagesCaptor0.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession0Link0.size()); verify(endpoint.getAmqpSendLink(1, 0)).send(sendMessagesCaptor1.capture()); final List<Message> messagesSendInSession1Link0 = sendMessagesCaptor1.getValue(); Assertions.assertEquals(messagesCount, messagesSendInSession1Link0.size()); verify(endpoint.getAmqpSendLink(2, 0), times(1)).send(any(Message.class)); } finally { sender.close(); connectionCache.dispose(); } } } @Test @Execution(ExecutionMode.SAME_THREAD) void shouldRecoverFromRetriableConnectionError() { final int endpointsCount = 4; final List<SessionLinkCount> sessionLinkCountList = new ArrayList<>(endpointsCount); sessionLinkCountList.add(new SessionLinkCount(1, new int[] { 1 })); sessionLinkCountList.add(new SessionLinkCount(1, new int[] { 1 })); sessionLinkCountList.add(new SessionLinkCount(1, new int[] { 1 })); sessionLinkCountList.add(new SessionLinkCount(1, new int[] { 1 })); try (MockEndpoints endpoints = createMockEndpoints(sessionLinkCountList)) { final AtomicReference<MockEndpoint> currentEndpoint = new AtomicReference<>(); final Supplier<ServiceBusReactorAmqpConnection> connectionSupplier = new Supplier<ServiceBusReactorAmqpConnection>() { private int endpointIndex = -1; @Override public ServiceBusReactorAmqpConnection get() { endpointIndex++; if (endpointIndex >= endpointsCount) { throw new RuntimeException("More than " + endpointsCount + " invocation of connection-supplier is not expected."); } final MockEndpoint e = endpoints.get(endpointIndex); currentEndpoint.set(e); final ServiceBusReactorAmqpConnection c = e.arrange(); return c; } }; final Answer<Mono<Void>> sendAnswer = new Answer<Mono<Void>>() { private int invocationCount = -1; @Override public Mono<Void> answer(InvocationOnMock invocation) { invocationCount++; switch (invocationCount) { case 0: Assertions.assertEquals(endpoints.get(0), currentEndpoint.get()); endpoints.get(0).emitCurrentSendLinkError(RETRIABLE_LINK_ERROR); return Mono.error(RETRIABLE_LINK_ERROR); case 1: Assertions.assertEquals(endpoints.get(1), currentEndpoint.get()); endpoints.get(1).emitCurrentSessionError(RETRIABLE_SESSION_ERROR); return Mono.error(RETRIABLE_SESSION_ERROR); case 2: Assertions.assertEquals(endpoints.get(2), currentEndpoint.get()); endpoints.get(2).emitConnectionError(RETRIABLE_CONNECTION_ERROR); return Mono.error(RETRIABLE_CONNECTION_ERROR); case 3: Assertions.assertEquals(endpoints.get(3), currentEndpoint.get()); return Mono.empty(); default: throw new RuntimeException("More than three invocations of send-answer is not expected."); } } }; when(endpoints.get(0).getAmqpSendLink(0, 0).send(anyList())).thenAnswer(sendAnswer); when(endpoints.get(1).getAmqpSendLink(0, 0).send(anyList())).thenAnswer(sendAnswer); when(endpoints.get(2).getAmqpSendLink(0, 0).send(anyList())).thenAnswer(sendAnswer); when(endpoints.get(3).getAmqpSendLink(0, 0).send(anyList())).thenAnswer(sendAnswer); final int messagesCount = 4; final List<ServiceBusMessage> messagesToSend = createMessagesToSend(messagesCount); final ReactorConnectionCache<ServiceBusReactorAmqpConnection> connectionCache = createConnectionCache(connectionSupplier); final ServiceBusSenderAsyncClient sender = createSenderAsyncClient(connectionCache, false); try { try (VirtualTimeStepVerifier verifier = new VirtualTimeStepVerifier()) { verifier.create(() -> sender.sendMessages(messagesToSend)) .thenAwait(VIRTUAL_TIME_SHIFT) .verifyComplete(); } verify(endpoints.get(0).getAmqpSendLink(0, 0), times(1)).send(anyList()); verify(endpoints.get(1).getAmqpSendLink(0, 0), times(1)).send(anyList()); verify(endpoints.get(2).getAmqpSendLink(0, 0), times(1)).send(anyList()); verify(endpoints.get(3).getAmqpSendLink(0, 0), times(1)).send(anyList()); } finally { sender.close(); connectionCache.dispose(); } } } private MockEndpoint createMockEndpoint(int sessionsCnt, int[] linksPerSession) { final String connectionId = "1"; return MockEndpoint.create(connectionId, QUEUE_NAME, RETRY_OPTIONS, sessionsCnt, linksPerSession); } private MockEndpoints createMockEndpoints(List<SessionLinkCount> sessionLinkCountList) { return MockEndpoints.create(QUEUE_NAME, RETRY_OPTIONS, sessionLinkCountList); } private Supplier<ServiceBusReactorAmqpConnection> singleConnectionSupplier(MockEndpoint endpoint) { final Supplier<ServiceBusReactorAmqpConnection> connectionSupplier = new Supplier<ServiceBusReactorAmqpConnection>() { private int invocationCount = -1; @Override public ServiceBusReactorAmqpConnection get() { invocationCount++; switch (invocationCount) { case 0: final ServiceBusReactorAmqpConnection c = endpoint.arrange(); return c; default: throw new RuntimeException("More than one invocation of connection-supplier is not expected."); } } }; return connectionSupplier; } private ReactorConnectionCache<ServiceBusReactorAmqpConnection> createConnectionCache( Supplier<ServiceBusReactorAmqpConnection> connectionSupplier) { return new ReactorConnectionCache<>(connectionSupplier, FQDN, QUEUE_NAME, getRetryPolicy(RETRY_OPTIONS), new HashMap<>()); } private ServiceBusSenderAsyncClient createSenderAsyncClient( ReactorConnectionCache<ServiceBusReactorAmqpConnection> connectionCache, boolean isSharedConnection) { final ConnectionCacheWrapper connectionSupport = new ConnectionCacheWrapper(connectionCache); return new ServiceBusSenderAsyncClient(QUEUE_NAME, MessagingEntityType.QUEUE, connectionSupport, RETRY_OPTIONS, DEFAULT_INSTRUMENTATION, messageSerializer, onClientClosed, "", CLIENT_IDENTIFIER); } private static List<ServiceBusMessage> createMessagesToSend(int messagesCount) { return IntStream.range(0, messagesCount) .mapToObj(__ -> new ServiceBusMessage(TEST_CONTENTS)) .collect(Collectors.toList()); } private static ServiceBusMessage createMessageToSend() { final ServiceBusMessage messageToSend = new ServiceBusMessage(TEST_CONTENTS); return messageToSend; } private static final class VirtualTimeStepVerifier implements AutoCloseable { private final VirtualTimeScheduler scheduler; VirtualTimeStepVerifier() { scheduler = VirtualTimeScheduler.create(); } <T> StepVerifier.Step<T> create(Supplier<Mono<T>> scenarioSupplier) { return StepVerifier.withVirtualTime(scenarioSupplier, () -> scheduler, 0); } @Override public void close() { scheduler.dispose(); } } private static class SessionLinkCount { private final int sessionsCnt; private final int[] linksPerSession; SessionLinkCount(int sessionsCnt, int[] linksPerSession) { this.sessionsCnt = sessionsCnt; this.linksPerSession = linksPerSession; } } private static final class MockEndpoints implements Closeable { private final List<MockEndpoint> mockEndpoints; private final int mockEndpointsCnt; private MockEndpoints(List<MockEndpoint> mockEndpoints) { this.mockEndpoints = mockEndpoints; this.mockEndpointsCnt = this.mockEndpoints.size(); } static MockEndpoints create(String queueName, AmqpRetryOptions retryOptions, List<SessionLinkCount> sessionLinkCounts) { final List<MockEndpoint> mockEndpoints = new ArrayList<>(sessionLinkCounts.size()); int conId = 1; for (SessionLinkCount slc : sessionLinkCounts) { mockEndpoints.add(MockEndpoint.create(String.valueOf(conId), queueName, retryOptions, slc.sessionsCnt, slc.linksPerSession)); conId++; } return new MockEndpoints(mockEndpoints); } MockEndpoint get(int index) { if (index >= mockEndpointsCnt) { throw new IndexOutOfBoundsException("index:" + index + " maxIndex: " + (mockEndpointsCnt - 1)); } return mockEndpoints.get(index); } @Override public void close() { for (MockEndpoint mockEndpoint : mockEndpoints) { mockEndpoint.close(); } } } private static final class MockEndpoint implements Closeable { private final String connectionId; private final String queueName; private final AmqpRetryOptions retryOptions; private final MockSendSessions mockSendSessions; private final ConnectionOptions connectionOptions; private final Connection connection; private final Reactor reactor; private final ReactorDispatcher reactorDispatcher; private final ReactorExecutor reactorExecutor; private final ReactorProvider reactorProvider; private final ConnectionHandler connectionHandler; private final Sinks.Many<EndpointState> connectionStateSink; private final ReactorHandlerProvider handlerProvider; private final ServiceBusAmqpLinkProvider linkProvider; private final TokenManagerProvider tokenManagerProvider; private final TokenManager tokenManager; private final MessageSerializer messageSerializer; private final AtomicBoolean arranged = new AtomicBoolean(false); private MockEndpoint(String connectionId, String queueName, AmqpRetryOptions retryOptions, MockSendSessions mockSendSessions, ConnectionOptions connectionOptions, Connection connection, Reactor reactor, ReactorDispatcher reactorDispatcher, ReactorExecutor reactorExecutor, ReactorProvider reactorProvider, ConnectionHandler connectionHandler, Sinks.Many<EndpointState> connectionStateSink, ReactorHandlerProvider handlerProvider, ServiceBusAmqpLinkProvider linkProvider, TokenManagerProvider tokenManagerProvider, TokenManager tokenManager, MessageSerializer messageSerializer) { this.connectionId = connectionId; this.queueName = queueName; this.retryOptions = retryOptions; this.mockSendSessions = mockSendSessions; this.connectionOptions = connectionOptions; this.connection = connection; this.reactor = reactor; this.reactorDispatcher = reactorDispatcher; this.reactorExecutor = reactorExecutor; this.reactorProvider = reactorProvider; this.connectionHandler = connectionHandler; this.connectionStateSink = connectionStateSink; this.handlerProvider = handlerProvider; this.linkProvider = linkProvider; this.tokenManagerProvider = tokenManagerProvider; this.tokenManager = tokenManager; this.messageSerializer = messageSerializer; } static MockEndpoint create(String connectionId, String queueName, AmqpRetryOptions retryOptions, int sessionsCnt, int[] linksPerSession) { Assertions.assertNotNull(retryOptions); Assertions.assertTrue(sessionsCnt > 0, "sessionsCnt must be > 0."); Assertions.assertEquals(sessionsCnt, linksPerSession.length); for (int linksCnt = 0; linksCnt < linksPerSession.length; linksCnt++) { Assertions.assertTrue(linksCnt >= 0, "links-count in linksPerSession must be >= 0."); } final MockSendSessions mockSendSessions = MockSendSessions.create(connectionId, sessionsCnt, linksPerSession); final ConnectionOptions connectionOptions = mock(ConnectionOptions.class); final Connection connection = mock(Connection.class); final Sinks.Many<EndpointState> connectionStateSink = Sinks.many().replay() .latestOrDefault(EndpointState.UNINITIALIZED); final ConnectionHandler connectionHandler = mock(ConnectionHandler.class); final Reactor reactor = mock(Reactor.class); final ReactorDispatcher reactorDispatcher = mock(ReactorDispatcher.class); final ReactorExecutor reactorExecutor = mock(ReactorExecutor.class); final ReactorProvider reactorProvider = mock(ReactorProvider.class); final ReactorHandlerProvider handlerProvider = mock(ReactorHandlerProvider.class); final ServiceBusAmqpLinkProvider linkProvider = mock(ServiceBusAmqpLinkProvider.class); final TokenManager tokenManager = mock(TokenManager.class); final TokenManagerProvider tokenManagerProvider = mock(TokenManagerProvider.class); final MessageSerializer messageSerializer = mock(MessageSerializer.class); return new MockEndpoint(connectionId, queueName, retryOptions, mockSendSessions, connectionOptions, connection, reactor, reactorDispatcher, reactorExecutor, reactorProvider, connectionHandler, connectionStateSink, handlerProvider, linkProvider, tokenManagerProvider, tokenManager, messageSerializer); } ServiceBusReactorAmqpConnection arrange() { if (arranged.getAndSet(true)) { throw new RuntimeException("Only one connection can be obtained from a MockEndpoint instance."); } mockSendSessions.arrange(handlerProvider, linkProvider, connection, connectionStateSink); when(connectionOptions.getRetry()).thenReturn(retryOptions); doNothing().when(connection).close(); connectionStateSink.emitNext(EndpointState.ACTIVE, Sinks.EmitFailureHandler.FAIL_FAST); when(connectionHandler.getEndpointStates()).thenReturn(connectionStateSink.asFlux().distinctUntilChanged()); doNothing().when(connectionHandler).close(); when(reactor.connectionToHost(any(), anyInt(), any())).thenReturn(connection); try { doAnswer(invocation -> { final Runnable work = invocation.getArgument(0); work.run(); return null; }).when(reactorDispatcher).invoke(any(Runnable.class)); } catch (IOException ioe) { throw new UncheckedIOException(ioe); } when(reactorDispatcher.getShutdownSignal()).thenReturn(Mono.empty()); doNothing().when(reactorExecutor).start(); when(reactorExecutor.closeAsync()).thenReturn(Mono.empty()); try { when(reactorProvider.createReactor(anyString(), anyInt())).thenReturn(reactor); } catch (IOException ioe) { throw new UncheckedIOException(ioe); } when(reactorProvider.getReactorDispatcher()).thenReturn(reactorDispatcher); when(reactorProvider.createExecutor(any(), anyString(), any(), any(), any())).thenReturn(reactorExecutor); when(handlerProvider.createConnectionHandler(anyString(), any())).thenReturn(connectionHandler); when(tokenManager.authorize()).thenReturn(Mono.just(Duration.ofHours(1).toMillis())); when(tokenManagerProvider.getTokenManager(any(), anyString())).thenReturn(tokenManager); final boolean isV2 = true; return new ServiceBusReactorAmqpConnection(connectionId, connectionOptions, reactorProvider, handlerProvider, linkProvider, tokenManagerProvider, messageSerializer, false, isV2); } AmqpSendLink getAmqpSendLink(int sessionIdx, int linkIdx) { return mockSendSessions.getAmqpSendLink(sessionIdx, linkIdx); } void emitConnectionError(Throwable throwable) { connectionStateSink.emitError(throwable, Sinks.EmitFailureHandler.FAIL_FAST); } void emitConnectionCompletion() { connectionStateSink.emitComplete(Sinks.EmitFailureHandler.FAIL_FAST); } void emitCurrentSessionState(EndpointState state) { final MockSendSession session = mockSendSessions.getCurrentSendSession(); session.emitSessionState(state); } void emitCurrentSessionError(Throwable throwable) { final MockSendSession session = mockSendSessions.getCurrentSendSession(); session.emitSessionError(throwable); } void emitCurrentSessionCompletion() { final MockSendSession session = mockSendSessions.getCurrentSendSession(); session.emitSessionCompletion(); } void emitCurrentSendLinkState(EndpointState state) { final MockSendLink sendLink = mockSendSessions.getCurrentSendLink(); sendLink.emitSendLinkState(state); } void emitCurrentSendLinkError(Throwable throwable) { final MockSendLink sendLink = mockSendSessions.getCurrentSendLink(); sendLink.emitSendLinkError(throwable); } void emitCurrentSendLinkCompletion() { final MockSendLink sendLink = mockSendSessions.getCurrentSendLink(); sendLink.emitSendLinkCompletion(); } @Override public void close() { Mockito.framework().clearInlineMock(connectionOptions); Mockito.framework().clearInlineMock(connection); Mockito.framework().clearInlineMock(connectionHandler); Mockito.framework().clearInlineMock(reactor); Mockito.framework().clearInlineMock(reactorDispatcher); Mockito.framework().clearInlineMock(reactorExecutor); Mockito.framework().clearInlineMock(reactorProvider); Mockito.framework().clearInlineMock(handlerProvider); Mockito.framework().clearInlineMock(linkProvider); Mockito.framework().clearInlineMock(tokenManager); Mockito.framework().clearInlineMock(tokenManagerProvider); Mockito.framework().clearInlineMock(messageSerializer); mockSendSessions.close(); } } private static final class MockSendSessions implements Closeable { private final Object lock = new Object(); private final List<MockSendSession> mockSendSessions; private final MockSendSession terminalMockSendSession; private final int sessionsCnt; private int sessionIdx; private MockSendSession currentMockSendSession; private MockSendSessions(List<MockSendSession> mockSendSessions, MockSendSession terminalMockSendSession) { this.mockSendSessions = mockSendSessions; this.terminalMockSendSession = terminalMockSendSession; this.sessionsCnt = this.mockSendSessions.size(); this.sessionIdx = 0; } static MockSendSessions create(String connectionId, int sessionsCnt, int[] linksPerSession) { final List<MockSendSession> mockSendSessions = new ArrayList<>(sessionsCnt); for (int i = 0; i < sessionsCnt; i++) { mockSendSessions.add(MockSendSession.create(connectionId, linksPerSession[i])); } final MockSendSession terminalMockSendSession = MockSendSession.create(connectionId, 0); return new MockSendSessions(Collections.unmodifiableList(mockSendSessions), terminalMockSendSession); } void arrange(ReactorHandlerProvider handlerProvider, AmqpLinkProvider linkProvider, Connection connection, Sinks.Many<EndpointState> connectionStateSink) { for (MockSendSession mockSession : mockSendSessions) { mockSession.arrange(); mockSession.emitSessionState(EndpointState.ACTIVE); } terminalMockSendSession.arrange(); terminalMockSendSession.emitSessionCompletion(); when(handlerProvider.createSessionHandler(anyString(), any(), anyString(), any())) .thenAnswer(invocation -> { final MockSendSession session = moveToNextSendSession(connectionStateSink); return session.getSessionHandler(); }); when(handlerProvider.createSendLinkHandler(anyString(), any(), anyString(), anyString())) .thenAnswer(invocation -> { final MockSendLink sendLink = moveToNextSendLinkInCurrentSession(); return sendLink.getSendLinkHandler(); }); when(linkProvider.createSendLink(any(ServiceBusReactorAmqpConnection.class), anyString(), any(Sender.class), any(SendLinkHandler.class), any(ReactorProvider.class), any(TokenManager.class), any(MessageSerializer.class), any(AmqpRetryOptions.class), any(Scheduler.class), any())) .thenAnswer(invocation -> { final SendLinkHandler sendLinkHandler = invocation.getArgument(3); final AmqpSendLink amqpSendLink = lookupAmqpSendLinkFor(sendLinkHandler); return amqpSendLink; }); final ArrayList<Session> qpidSessions = new ArrayList<>(sessionsCnt + 1); for (MockSendSession mockSendSession : mockSendSessions) { qpidSessions.add(mockSendSession.getQpidSession()); } qpidSessions.add(terminalMockSendSession.getQpidSession()); when(connection.session()) .thenReturn(qpidSessions.get(0), qpidSessions.subList(1, sessionsCnt + 1).toArray(new Session[0])); } AmqpSendLink getAmqpSendLink(int sessionIdx, int linkIdx) { Assertions.assertTrue(sessionIdx >= 0 && sessionIdx < sessionsCnt, "sessionIdx is not in range."); final MockSendSession session = mockSendSessions.get(sessionIdx); return session.getAmqpSendLink(linkIdx); } MockSendSession getCurrentSendSession() { final MockSendSession session; synchronized (lock) { session = Objects.requireNonNull(currentMockSendSession, "Current Session is null"); } return session; } MockSendLink getCurrentSendLink() { final MockSendLink sendLink; synchronized (lock) { final MockSendSession session = getCurrentSendSession(); sendLink = session.getCurrentSendLink(); } return sendLink; } private MockSendSession moveToNextSendSession(Sinks.Many<EndpointState> connectionStateSink) { final MockSendSession nextSession; synchronized (lock) { if (sessionIdx >= sessionsCnt) { nextSession = terminalMockSendSession; } else { nextSession = mockSendSessions.get(sessionIdx); sessionIdx++; } this.currentMockSendSession = nextSession; } if (isTerminalSession(nextSession)) { connectionStateSink.emitComplete(Sinks.EmitFailureHandler.FAIL_FAST); } return nextSession; } private MockSendLink moveToNextSendLinkInCurrentSession() { final MockSendSession session; final MockSendLink nextSendLink; synchronized (lock) { session = Objects.requireNonNull(currentMockSendSession, "Current Session is null"); nextSendLink = session.moveToNextSendLink(); } if (session.isTerminalSendLink(nextSendLink)) { session.emitSessionCompletion(); } return nextSendLink; } private AmqpSendLink lookupAmqpSendLinkFor(SendLinkHandler sendLinkHandler) { for (MockSendSession mockSendSession : mockSendSessions) { final AmqpSendLink amqpSendLink = mockSendSession.lookupAmqpSendLinkFor(sendLinkHandler); if (amqpSendLink != null) { return amqpSendLink; } } final AmqpSendLink amqpSendLink = terminalMockSendSession.lookupAmqpSendLinkFor(sendLinkHandler); if (amqpSendLink != null) { return amqpSendLink; } throw new NullPointerException("Lookup for AmqpSendLink failed."); } private boolean isTerminalSession(MockSendSession session) { return session == terminalMockSendSession; } @Override public void close() { for (MockSendSession mockSendSession : mockSendSessions) { mockSendSession.close(); } terminalMockSendSession.close(); } } private static final class MockSendSession implements Closeable { private final String connectionId; private final Session session; private final Record sessionAttachments; private final SessionHandler sessionHandler; private final Sinks.Many<EndpointState> sessionStateSink; private final List<MockSendLink> mockSendLinks; private final MockSendLink terminalMockSendLink; private final int sendLinkCnt; private int sendLinkIdx; private MockSendLink currentMockSendLink; private MockSendSession(String connectionId, Session session, Record sessionAttachments, SessionHandler sessionHandler, Sinks.Many<EndpointState> sessionStateSink, List<MockSendLink> mockSendLinks, MockSendLink terminalMockSendLink) { this.connectionId = connectionId; this.session = session; this.sessionAttachments = sessionAttachments; this.sessionHandler = sessionHandler; this.sessionStateSink = sessionStateSink; this.mockSendLinks = mockSendLinks; this.terminalMockSendLink = terminalMockSendLink; this.sendLinkCnt = this.mockSendLinks.size(); this.sendLinkIdx = 0; } static MockSendSession create(String connectionId, int sendLinkCnt) { final List<MockSendLink> mockSendLinks = new ArrayList<>(sendLinkCnt); for (int i = 0; i < sendLinkCnt; i++) { mockSendLinks.add(MockSendLink.create()); } final MockSendLink terminalMockSendLink = MockSendLink.create(); final Record sessionAttachments = mock(Record.class); final Session session = mock(Session.class); final SessionHandler sessionHandler = mock(SessionHandler.class); final Sinks.Many<EndpointState> sessionStateSink = Sinks.many().replay() .latestOrDefault(EndpointState.UNINITIALIZED); return new MockSendSession(connectionId, session, sessionAttachments, sessionHandler, sessionStateSink, Collections.unmodifiableList(mockSendLinks), terminalMockSendLink); } void arrange() { for (MockSendLink mockSendLink : mockSendLinks) { mockSendLink.arrange(); mockSendLink.emitSendLinkState(EndpointState.ACTIVE); } terminalMockSendLink.arrange(); terminalMockSendLink.emitSendLinkCompletion(); final Answer<Mono<Void>> terminalSendAnswer = new Answer<Mono<Void>>() { @Override public Mono<Void> answer(InvocationOnMock invocation) { return Mono.error(new AmqpException(true, "terminal-send-link-result", null)); } }; when(terminalMockSendLink.getAmqpSendLink().send(anyList())).then(terminalSendAnswer); when(terminalMockSendLink.getAmqpSendLink().send(any(Message.class))).then(terminalSendAnswer); when(terminalMockSendLink.getAmqpSendLink().send(any(Message.class), any(DeliveryState.class))).then(terminalSendAnswer); when(terminalMockSendLink.getAmqpSendLink().send(anyList(), any(DeliveryState.class))).then(terminalSendAnswer); when(terminalMockSendLink.getAmqpSendLink().send(any(), anyInt(), anyInt(), any(DeliveryState.class))).then(terminalSendAnswer); doNothing().when(sessionAttachments).set(any(), any(), anyString()); when(session.attachments()).thenReturn(sessionAttachments); doNothing().when(session).open(); doNothing().when(session).setCondition(any()); when(sessionHandler.getConnectionId()).thenReturn(connectionId); when(sessionHandler.getEndpointStates()).thenReturn(sessionStateSink.asFlux().distinctUntilChanged()); doNothing().when(sessionHandler).close(); final ArrayList<Sender> qpidSenders = new ArrayList<>(sendLinkCnt + 1); for (MockSendLink mockSendLink : mockSendLinks) { qpidSenders.add(mockSendLink.getQpidSender()); } qpidSenders.add(terminalMockSendLink.getQpidSender()); when(session.sender(any())) .thenReturn(qpidSenders.get(0), qpidSenders.subList(1, sendLinkCnt + 1).toArray(new Sender[0])); } Session getQpidSession() { return session; } SessionHandler getSessionHandler() { return sessionHandler; } AmqpSendLink getAmqpSendLink(int linkIdx) { Assertions.assertTrue(linkIdx >= 0 && linkIdx < sendLinkCnt, "linkIdx is not in range."); return mockSendLinks.get(linkIdx).getAmqpSendLink(); } MockSendLink moveToNextSendLink() { final MockSendLink nextSendLink; if (sendLinkIdx >= sendLinkCnt) { nextSendLink = terminalMockSendLink; } else { nextSendLink = mockSendLinks.get(sendLinkIdx); sendLinkIdx++; } currentMockSendLink = nextSendLink; return nextSendLink; } boolean isTerminalSendLink(MockSendLink link) { return this.terminalMockSendLink == link; } MockSendLink getCurrentSendLink() { return Objects.requireNonNull(currentMockSendLink, "Current Link is null"); } AmqpSendLink lookupAmqpSendLinkFor(SendLinkHandler sendLinkHandler) { for (MockSendLink sendLink : mockSendLinks) { if (sendLink.getSendLinkHandler() == sendLinkHandler) { return sendLink.getAmqpSendLink(); } } if (terminalMockSendLink.getSendLinkHandler() == sendLinkHandler) { return terminalMockSendLink.getAmqpSendLink(); } return null; } void emitSessionState(EndpointState state) { this.sessionStateSink.emitNext(state, Sinks.EmitFailureHandler.FAIL_FAST); } void emitSessionError(Throwable error) { this.sessionStateSink.emitError(error, Sinks.EmitFailureHandler.FAIL_FAST); } void emitSessionCompletion() { this.sessionStateSink.emitComplete(Sinks.EmitFailureHandler.FAIL_FAST); } @Override public void close() { Mockito.framework().clearInlineMock(session); Mockito.framework().clearInlineMock(sessionAttachments); Mockito.framework().clearInlineMock(sessionHandler); for (MockSendLink sendLink : mockSendLinks) { sendLink.close(); } terminalMockSendLink.close(); } } private static final class MockSendLink implements Closeable { private final Sender sender; private final Record senderAttachments; private final AmqpSendLink amqpSendLink; private final SendLinkHandler sendLinkHandler; private final Sinks.Many<EndpointState> sendLinkStateSink; private MockSendLink(Sender sender, Record senderAttachments, AmqpSendLink amqpSendLink, SendLinkHandler sendLinkHandler, Sinks.Many<EndpointState> sendLinkStateSink) { this.sender = sender; this.senderAttachments = senderAttachments; this.amqpSendLink = amqpSendLink; this.sendLinkHandler = sendLinkHandler; this.sendLinkStateSink = sendLinkStateSink; } static MockSendLink create() { final Record senderAttachments = mock(Record.class); final Sender sender = mock(Sender.class); final AmqpSendLink amqpSendLink = mock(AmqpSendLink.class); final SendLinkHandler sendLinkHandler = mock(SendLinkHandler.class); final Sinks.Many<EndpointState> sendLinkStateSink = Sinks.many().replay() .latestOrDefault(EndpointState.UNINITIALIZED); return new MockSendLink(sender, senderAttachments, amqpSendLink, sendLinkHandler, sendLinkStateSink); } void arrange() { doNothing().when(senderAttachments).set(any(), any(), anyString()); when(sender.attachments()).thenReturn(senderAttachments); doNothing().when(sender).setTarget(any()); doNothing().when(sender).setSenderSettleMode(any()); doNothing().when(sender).setProperties(any()); doNothing().when(sender).setSource(any()); doNothing().when(sender).open(); when(amqpSendLink.getLinkSize()).thenReturn(Mono.just(ServiceBusSenderAsyncClient.MAX_MESSAGE_LENGTH_BYTES)); when(amqpSendLink.getEndpointStates()) .thenReturn(sendLinkStateSink.asFlux().distinctUntilChanged().map(state -> toAmqpEndpointState(state))); when(sendLinkHandler.getEndpointStates()).thenReturn(sendLinkStateSink.asFlux().distinctUntilChanged()); doNothing().when(sendLinkHandler).close(); } Sender getQpidSender() { return sender; } AmqpSendLink getAmqpSendLink() { return amqpSendLink; } SendLinkHandler getSendLinkHandler() { return sendLinkHandler; } void emitSendLinkState(EndpointState state) { this.sendLinkStateSink.emitNext(state, Sinks.EmitFailureHandler.FAIL_FAST); } void emitSendLinkError(Throwable error) { this.sendLinkStateSink.emitError(error, Sinks.EmitFailureHandler.FAIL_FAST); } void emitSendLinkCompletion() { this.sendLinkStateSink.emitComplete(Sinks.EmitFailureHandler.FAIL_FAST); } private static AmqpEndpointState toAmqpEndpointState(EndpointState state) { switch (state) { case ACTIVE: return AmqpEndpointState.ACTIVE; case UNINITIALIZED: return AmqpEndpointState.UNINITIALIZED; case CLOSED: return AmqpEndpointState.CLOSED; default: throw new IllegalArgumentException("This endpoint state is not supported. State:" + state); } } @Override public void close() { Mockito.framework().clearInlineMock(sender); Mockito.framework().clearInlineMock(senderAttachments); Mockito.framework().clearInlineMock(amqpSendLink); Mockito.framework().clearInlineMock(sendLinkHandler); } } }
uncomment or delete?
public void gatewayDiagnostics() throws Exception { Thread.sleep(2000); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = containerGateway.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).contains("\"userAgent\":\"" + this.gatewayClientUserAgent + "\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Create\""); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + this.gatewayClientUserAgent + "\""); assertThat(diagnostics).containsAnyOf( "\"machineId\":\"" + tempMachineId + "\"", "\"machineId\":\"" + ClientTelemetry.getMachineId(null) + "\"" ); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), gatewayClient.asyncClient()); isValidJSON(diagnostics); }
public void gatewayDiagnostics() throws Exception { Thread.sleep(2000); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = containerGateway.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).contains("\"userAgent\":\"" + this.gatewayClientUserAgent + "\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Create\""); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + this.gatewayClientUserAgent + "\""); assertThat(diagnostics).containsAnyOf( "\"machineId\":\"" + tempMachineId + "\"", "\"machineId\":\"" + ClientTelemetry.getMachineId(null) + "\"" ); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull(); assertThat(createResponse.getDiagnostics().getRegionsContacted()).isNotEmpty(); validateTransportRequestTimelineGateway(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), gatewayClient.asyncClient()); isValidJSON(diagnostics); }
class CosmosDiagnosticsTest extends TestSuiteBase { private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final String USER_AGENT_SUFFIX_GATEWAY_CLIENT = "gatewayClientSuffix"; private static final String USER_AGENT_SUFFIX_DIRECT_CLIENT = "directClientSuffix"; private static final DateTimeFormatter RESPONSE_TIME_FORMATTER = DateTimeFormatter.ISO_INSTANT; private static final String tempMachineId = getTempMachineId(); private CosmosClient gatewayClient; private CosmosClient directClient; private CosmosAsyncDatabase cosmosAsyncDatabase; private CosmosContainer containerGateway; private CosmosContainer containerDirect; private CosmosAsyncContainer cosmosAsyncContainer; private String gatewayClientUserAgent; private String directClientUserAgent; private static String getTempMachineId() { Field field = null; try { field = RxDocumentClientImpl.class.getDeclaredField("tempMachineId"); } catch (NoSuchFieldException e) { fail(e.toString()); } field.setAccessible(true); try { return (String)field.get(null); } catch (IllegalAccessException e) { fail(e.toString()); return null; } } @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() { assertThat(this.gatewayClient).isNull(); gatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .userAgentSuffix(USER_AGENT_SUFFIX_GATEWAY_CLIENT) .gatewayMode() .buildClient(); UserAgentContainer userAgentContainer = new UserAgentContainer(); userAgentContainer.setSuffix(USER_AGENT_SUFFIX_GATEWAY_CLIENT); this.gatewayClientUserAgent = userAgentContainer.getUserAgent(); directClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .userAgentSuffix(USER_AGENT_SUFFIX_DIRECT_CLIENT) .directMode() .buildClient(); userAgentContainer.setSuffix(USER_AGENT_SUFFIX_DIRECT_CLIENT); this.directClientUserAgent = userAgentContainer.getUserAgent(); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(this.gatewayClient.asyncClient()); cosmosAsyncDatabase = directClient.asyncClient().getDatabase(cosmosAsyncContainer.getDatabase().getId()); containerGateway = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); containerDirect = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { if (this.gatewayClient != null) { this.gatewayClient.close(); } if (this.directClient != null) { this.directClient.close(); } } @DataProvider(name = "query") private Object[][] query() { return new Object[][]{ new Object[] { "Select * from c where c.id = 'wrongId'", true }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", true }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", true }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, }; } @DataProvider(name = "readAllItemsOfLogicalPartition") private Object[][] readAllItemsOfLogicalPartition() { return new Object[][]{ new Object[] { 1, true }, new Object[] { 5, null }, new Object[] { 20, null }, new Object[] { 1, false }, new Object[] { 5, false }, new Object[] { 20, false }, }; } @DataProvider(name = "connectionStateListenerArgProvider") public Object[][] connectionStateListenerArgProvider() { return new Object[][]{ {true}, {false} }; } @Test(groups = {"simple"}, timeOut = TIMEOUT) @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnosticsOnException() throws Exception { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; try { createResponse = this.containerGateway.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = this.containerGateway.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).contains("\"userAgent\":\"" + this.gatewayClientUserAgent + "\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"statusCode\":404"); assertThat(diagnostics).contains("\"operationType\":\"Read\""); assertThat(diagnostics).contains("\"userAgent\":\"" + this.gatewayClientUserAgent + "\""); assertThat(diagnostics).contains("\"exceptionMessage\":\"Entity with the specified id does not exist in the system."); assertThat(diagnostics).contains("\"exceptionResponseHeaders\""); assertThat(diagnostics).doesNotContain("\"exceptionResponseHeaders\": \"{}\""); assertThat(diagnostics).containsAnyOf( "\"machineId\":\"" + tempMachineId + "\"", "\"machineId\":\"" + ClientTelemetry.getMachineId(null) + "\"" ); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull(); validateRegionContacted(createResponse.getDiagnostics(), this.containerGateway.asyncContainer.getDatabase().getClient()); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); isValidJSON(diagnostics); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void systemDiagnosticsForSystemStateInformation() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = this.containerGateway.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("systemInformation"); assertThat(diagnostics).contains("usedMemory"); assertThat(diagnostics).contains("availableMemory"); assertThat(diagnostics).contains("systemCpuLoad"); assertThat(diagnostics).contains("\"userAgent\":\"" + this.gatewayClientUserAgent + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnostics() throws Exception { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = containerDirect.createItem(internalObjectNode); validateDirectModeDiagnosticsOnSuccess(createResponse.getDiagnostics(), directClient, this.directClientUserAgent); try { containerDirect.createItem(internalObjectNode); fail("expected 409"); } catch (CosmosException e) { validateDirectModeDiagnosticsOnException(e.getDiagnostics(), this.directClientUserAgent); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void requestSessionTokenDiagnostics() { CosmosClient testSessionTokenClient = null; try { testSessionTokenClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .consistencyLevel(ConsistencyLevel.SESSION) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer cosmosContainer = testSessionTokenClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"requestSessionToken\":null"); String sessionToken = createResponse.getSessionToken(); CosmosItemResponse<InternalObjectNode> readResponse = cosmosContainer.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey(BridgeInternal.getProperties(createResponse).getId()), InternalObjectNode.class); diagnostics = readResponse.getDiagnostics().toString(); assertThat(diagnostics).contains(String.format("\"requestSessionToken\":\"%s\"", sessionToken)); CosmosBatch batch = CosmosBatch.createCosmosBatch(new PartitionKey( BridgeInternal.getProperties(createResponse).getId())); internalObjectNode = getInternalObjectNode(); batch.createItemOperation(internalObjectNode); CosmosBatchResponse batchResponse = cosmosContainer.executeCosmosBatch(batch, new CosmosBatchRequestOptions().setSessionToken(readResponse.getSessionToken())); diagnostics = batchResponse.getDiagnostics().toString(); assertThat(diagnostics).contains(String.format("\"requestSessionToken\":\"%s\"", readResponse.getSessionToken())); } finally { if (testSessionTokenClient != null) { testSessionTokenClient.close(); } } } @Test(groups = {"simple"}) public void databaseAccountToClients() { CosmosClient testClient = null; try { testClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer cosmosContainer = testClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains(String.format("\"clientEndpoints\"" + ":{\"%s\"", TestConfigurations.HOST)); int clientsIndex = diagnostics.indexOf("\"clientEndpoints\":"); String[] substrings = diagnostics.substring(clientsIndex, clientsIndex + 120) .split("}")[0].split(":"); String intString = substrings[substrings.length-1]; int intValue = Integer.parseInt(intString); CosmosClient testClient2 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); internalObjectNode = getInternalObjectNode(); createResponse = cosmosContainer.createItem(internalObjectNode); diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains(String.format("\"clientEndpoints\"" + ":{\"%s\"", TestConfigurations.HOST)); clientsIndex = diagnostics.indexOf("\"clientEndpoints\":"); substrings = diagnostics.substring(clientsIndex, clientsIndex + 120) .split("}")[0].split(":"); intString = substrings[substrings.length-1]; assertThat(Integer.parseInt(intString)).isEqualTo(intValue+1); testClient2.close(); } finally { if (testClient != null) { testClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryPlanDiagnostics() throws JsonProcessingException { List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = containerDirect.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); queryList.add("Select * from c"); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); queryList.add("Select * from c where c.id = 'wrongId'"); for(String query : queryList) { int feedResponseCounter = 0; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = containerDirect.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); if (feedResponseCounter == 0) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); String requestTimeLine = OBJECT_MAPPER.writeValueAsString(feedResponse.getCosmosDiagnostics().getFeedResponseDiagnostics().getQueryPlanDiagnosticsContext().getRequestTimeline()); assertThat(requestTimeLine).contains("connectionConfigured"); assertThat(requestTimeLine).contains("requestSent"); assertThat(requestTimeLine).contains("transitTime"); assertThat(requestTimeLine).contains("received"); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan RequestTimeline ="); } feedResponseCounter++; } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void readManyDiagnostics() { String pkValue = UUID.randomUUID().toString(); PartitionKey partitionKey = new PartitionKey(pkValue); List<CosmosItemIdentity> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(pkValue); CosmosItemResponse<InternalObjectNode> createResponse = containerDirect.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(new CosmosItemIdentity(partitionKey, internalObjectNode.getId())); } } FeedResponse<InternalObjectNode> response = containerDirect.readMany(itemIdList, InternalObjectNode.class); FeedResponseDiagnostics diagnostics = response.getCosmosDiagnostics().getFeedResponseDiagnostics(); assertThat(diagnostics.getClientSideRequestStatistics().size()).isEqualTo(1); assertThat(diagnostics.getQueryMetricsMap().values().iterator().next().getRetrievedDocumentCount()).isEqualTo(itemIdList.size()); String cosmosDiagnosticsString = response.getCosmosDiagnostics().toString(); assertThat(cosmosDiagnosticsString).contains("\"userAgent\":\"" + this.directClientUserAgent + "\""); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithIndexMetrics() { List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = containerDirect.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); for (String query : queryList) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); options.setIndexMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = containerDirect.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); logger.info("This is query diagnostics {}", queryDiagnostics); if (feedResponse.getResponseHeaders().containsKey(HttpConstants.HttpHeaders.INDEX_UTILIZATION)) { assertThat(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION)).isNotNull(); assertThat(createFromJSONString(Utils.decodeBase64String(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION))).getUtilizedSingleIndexes()).isNotNull(); } } } } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT) public void queryMetrics(String query, Boolean qmEnabled) { CosmosContainer directContainer = this.directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()) .getContainer(this.cosmosAsyncContainer.getId()); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } boolean qroupByFirstResponse = true; Iterator<FeedResponse<InternalObjectNode>> iterator = directContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateDirectModeQueryDiagnostics(queryDiagnostics, this.directClientUserAgent); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryDiagnosticsOnOrderBy() { String containerId = "testcontainer"; cosmosAsyncDatabase.createContainer(containerId, "/mypk", ThroughputProperties.createManualThroughput(40000)).block(); CosmosAsyncContainer testcontainer = cosmosAsyncDatabase.getContainer(containerId); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setConsistencyLevel(ConsistencyLevel.EVENTUAL); testcontainer.createItem(getInternalObjectNode()).block(); options.setMaxDegreeOfParallelism(-1); String query = "SELECT * from c ORDER BY c._ts DESC"; CosmosPagedFlux<InternalObjectNode> cosmosPagedFlux = testcontainer.queryItems(query, options, InternalObjectNode.class); Set<String> partitionKeyRangeIds = new HashSet<>(); Set<String> pkRids = new HashSet<>(); cosmosPagedFlux.byPage().flatMap(feedResponse -> { String cosmosDiagnosticsString = feedResponse.getCosmosDiagnostics().toString(); Pattern pattern = Pattern.compile("(\"partitionKeyRangeId\":\")(\\d)"); Matcher matcher = pattern.matcher(cosmosDiagnosticsString); while (matcher.find()) { String group = matcher.group(2); partitionKeyRangeIds.add(group); } pattern = Pattern.compile("(pkrId:)(\\d)"); matcher = pattern.matcher(cosmosDiagnosticsString); while (matcher.find()) { String group = matcher.group(2); pkRids.add(group); } return Flux.just(feedResponse); }).blockLast(); assertThat(pkRids).isNotEmpty(); assertThat(pkRids).isEqualTo(partitionKeyRangeIds); deleteCollection(testcontainer); } private void validateDirectModeDiagnosticsOnSuccess( CosmosDiagnostics cosmosDiagnostics, CosmosClient testDirectClient, String userAgent) throws Exception { String diagnostics = cosmosDiagnostics.toString(); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"PARTITION_KEY_RANGE_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"SERVER_ADDRESS_LOOKUP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + userAgent + "\""); assertThat(diagnostics).containsAnyOf( "\"machineId\":\"" + tempMachineId + "\"", "\"machineId\":\"" + ClientTelemetry.getMachineId(null) + "\"" ); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(diagnostics).contains("\"retryAfterInMs\""); assertThat(diagnostics).contains("\"channelStatistics\""); assertThat(cosmosDiagnostics.getContactedRegionNames()).isNotEmpty(); assertThat(cosmosDiagnostics.getDuration()).isNotNull(); validateTransportRequestTimelineDirect(diagnostics); validateRegionContacted(cosmosDiagnostics, testDirectClient.asyncClient()); validateChannelStatistics(cosmosDiagnostics); isValidJSON(diagnostics); } private void validateDirectModeDiagnosticsOnException(CosmosDiagnostics cosmosDiagnostics, String userAgent) { String diagnosticsString = cosmosDiagnostics.toString(); assertThat(diagnosticsString).contains("\"backendLatencyInMs\""); assertThat(diagnosticsString).contains("\"userAgent\":\"" + userAgent + "\""); assertThat(diagnosticsString).contains("\"retryAfterInMs\""); assertThat(diagnosticsString).contains("\"exceptionMessage\":\"[\\\"Resource with specified id or name already exists.\\\"]\""); assertThat(diagnosticsString).contains("\"exceptionResponseHeaders\""); assertThat(diagnosticsString).doesNotContain("\"exceptionResponseHeaders\": \"{}\""); validateTransportRequestTimelineDirect(diagnosticsString); validateChannelStatistics(cosmosDiagnostics); } private void validateDirectModeQueryDiagnostics(String diagnostics, String userAgent) { assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("responseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"userAgent\":\"" + userAgent + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); } private void validateGatewayModeQueryDiagnostics(String diagnostics, String userAgent) { assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Query\""); assertThat(diagnostics).contains("\"userAgent\":\"" + userAgent + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"regionsContacted\""); } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT*2) public void queryDiagnosticsGatewayMode(String query, Boolean qmEnabled) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); List<String> itemIdList = new ArrayList<>(); for (int i = 0; i < 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); containerGateway.createItem(internalObjectNode); if (i % 20 == 0) { itemIdList.add(internalObjectNode.getId()); } } boolean qroupByFirstResponse = true; if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } Iterator<FeedResponse<InternalObjectNode>> iterator = containerGateway .queryItems(query, options, InternalObjectNode.class) .iterableByPage() .iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateGatewayModeQueryDiagnostics(queryDiagnostics, this.gatewayClientUserAgent); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithADifferentLocale() { Locale.setDefault(Locale.GERMAN); String query = "select * from root where root.id= \"someid\""; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); Iterator<FeedResponse<InternalObjectNode>> iterator = this.containerGateway.queryItems(query, options, InternalObjectNode.class) .iterableByPage().iterator(); double requestCharge = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); requestCharge += feedResponse.getRequestCharge(); } assertThat(requestCharge).isGreaterThan(0); Locale.setDefault(Locale.ROOT); } private static void validateQueryDiagnostics( String queryDiagnostics, Boolean qmEnabled, boolean expectQueryPlanDiagnostics) { if (qmEnabled == null || qmEnabled) { assertThat(queryDiagnostics).contains("Retrieved Document Count"); assertThat(queryDiagnostics).contains("Query Preparation Times"); assertThat(queryDiagnostics).contains("Runtime Execution Times"); assertThat(queryDiagnostics).contains("Partition Execution Timeline"); } else { assertThat(queryDiagnostics).doesNotContain("Retrieved Document Count"); assertThat(queryDiagnostics).doesNotContain("Query Preparation Times"); assertThat(queryDiagnostics).doesNotContain("Runtime Execution Times"); assertThat(queryDiagnostics).doesNotContain("Partition Execution Timeline"); } if (expectQueryPlanDiagnostics) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); } } @Test(groups = {"simple"}, dataProvider = "readAllItemsOfLogicalPartition", timeOut = TIMEOUT) public void queryMetricsForReadAllItemsOfLogicalPartition(Integer expectedItemCount, Boolean qmEnabled) { String pkValue = UUID.randomUUID().toString(); for (int i = 0; i < expectedItemCount; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(pkValue); CosmosItemResponse<InternalObjectNode> createResponse = containerGateway.createItem(internalObjectNode); } CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options = options.setQueryMetricsEnabled(qmEnabled); } ModelBridgeInternal.setQueryRequestOptionsMaxItemCount(options, 5); Iterator<FeedResponse<InternalObjectNode>> iterator = this.containerGateway .readAllItems( new PartitionKey(pkValue), options, InternalObjectNode.class) .iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); int actualItemCount = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); actualItemCount += feedResponse.getResults().size(); validateQueryDiagnostics(queryDiagnostics, qmEnabled, false); } assertThat(actualItemCount).isEqualTo(expectedItemCount); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnException() throws Exception { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; try { createResponse = containerDirect.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = containerDirect.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("\"userAgent\":\"" + this.directClientUserAgent + "\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(diagnostics).contains("\"retryAfterInMs\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"exceptionMessage\":\"[\\\"Resource Not Found."); assertThat(diagnostics).contains("\"exceptionResponseHeaders\""); assertThat(diagnostics).doesNotContain("\"exceptionResponseHeaders\":null"); isValidJSON(diagnostics); validateTransportRequestTimelineDirect(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), directClient.asyncClient()); ObjectNode diagnosticsNode = (ObjectNode) OBJECT_MAPPER.readTree(diagnostics); JsonNode responseStatisticsList = diagnosticsNode.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); assertThat(storeResult).isNotNull(); JsonNode replicaStatusList = storeResult.get("replicaStatusList"); assertThat(replicaStatusList.isArray()).isTrue(); assertThat(replicaStatusList.size()).isGreaterThan(0); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnMetadataException() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); HttpClient mockHttpClient = Mockito.mock(HttpClient.class); Mockito.when(mockHttpClient.send(Mockito.any(HttpRequest.class), Mockito.any(Duration.class))) .thenReturn(Mono.error(new CosmosException(400, "TestBadRequest"))); RxStoreModel rxGatewayStoreModel = rxGatewayStoreModel = ReflectionUtils.getGatewayProxy((RxDocumentClientImpl) client.asyncClient().getDocClientWrapper()); ReflectionUtils.setGatewayHttpClient(rxGatewayStoreModel, mockHttpClient); container.createItem(internalObjectNode); fail("request should fail as bad request"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.BADREQUEST); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("\"exceptionMessage\":\"TestBadRequest\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(diagnostics).contains("\"resourceType\":\"DocumentCollection\""); assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); isValidJSON(diagnostics); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void supplementalResponseStatisticsList() throws Exception { ClientSideRequestStatistics clientSideRequestStatistics = new ClientSideRequestStatistics(mockDiagnosticsClientContext()); for (int i = 0; i < 15; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null, null); } List<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); ObjectMapper objectMapper = new ObjectMapper(); String diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); JsonNode jsonNode = objectMapper.readTree(diagnostics); ArrayNode supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(15); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(10); clearStoreResponseStatistics(clientSideRequestStatistics); storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); assertThat(storeResponseStatistics.size()).isEqualTo(0); for (int i = 0; i < 7; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null, null); } storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); objectMapper = new ObjectMapper(); diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); jsonNode = objectMapper.readTree(diagnostics); supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(7); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(7); for(JsonNode node : supplementalResponseStatisticsListNode) { assertThat(node.get("storeResult").asText()).isNotNull(); String requestResponseTimeUTC = node.get("requestResponseTimeUTC").asText(); Instant instant = Instant.from(RESPONSE_TIME_FORMATTER.parse(requestResponseTimeUTC)); assertThat(Instant.now().toEpochMilli() - instant.toEpochMilli()).isLessThan(5000); assertThat(node.get("requestResponseTimeUTC")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); assertThat(node.get("requestSessionToken")).isNotNull(); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void serializationOnVariousScenarios() { CosmosDatabaseResponse cosmosDatabase = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).read(); String diagnostics = cosmosDatabase.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"DATABASE_DESERIALIZATION\""); CosmosContainerResponse containerResponse = this.containerGateway.read(); diagnostics = containerResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"CONTAINER_DESERIALIZATION\""); TestItem testItem = new TestItem(); testItem.id = "TestId"; testItem.mypk = "TestPk"; CosmosItemResponse<TestItem> itemResponse = this.containerGateway.createItem(testItem); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); testItem.id = "TestId2"; testItem.mypk = "TestPk"; itemResponse = this.containerGateway.createItem(testItem, new PartitionKey("TestPk"), null); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).doesNotContain("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).doesNotContain("\"serializationType\":\"ITEM_DESERIALIZATION\""); TestItem readTestItem = itemResponse.getItem(); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); CosmosItemResponse<InternalObjectNode> readItemResponse = this.containerGateway.readItem(testItem.id, new PartitionKey(testItem.mypk), null, InternalObjectNode.class); InternalObjectNode properties = readItemResponse.getItem(); diagnostics = readItemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + this.gatewayClientUserAgent + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdRequestResponseLengthStatistics() throws Exception { TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); CosmosItemResponse<TestItem> createItemResponse = container.createItem(testItem); validate(createItemResponse.getDiagnostics(), testItemLength, ModelBridgeInternal.getPayloadLength(createItemResponse)); try { container.createItem(testItem); fail("expected to fail due to 409"); } catch (CosmosException e) { logger.info("Diagnostics are : {}", e.getDiagnostics()); String diagnostics = e.getDiagnostics().toString(); assertThat(diagnostics).contains("\"exceptionMessage\":\"[\\\"Resource with specified id or name already exists.\\\"]\""); assertThat(diagnostics).contains("\"exceptionResponseHeaders\""); assertThat(diagnostics).doesNotContain("\"exceptionResponseHeaders\": \"{}\""); validate(e.getDiagnostics(), testItemLength, 0); } CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } @Test(groups = {"simple"}, dataProvider = "connectionStateListenerArgProvider", timeOut = TIMEOUT) public void rntbdStatistics(boolean connectionStateListenerEnabled) throws Exception { Instant beforeClientInitialization = Instant.now(); CosmosClient client1 = null; try { DirectConnectionConfig connectionConfig = DirectConnectionConfig.getDefaultConfig(); connectionConfig.setConnectionEndpointRediscoveryEnabled(connectionStateListenerEnabled); client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .directMode(connectionConfig) .buildClient(); TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = client1.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); Thread.sleep(1000); Instant beforeInitializingRntbdServiceEndpoint = Instant.now(); CosmosItemResponse<TestItem> operation1Response = container.upsertItem(testItem); Instant afterInitializingRntbdServiceEndpoint = Instant.now(); Thread.sleep(1000); Instant beforeOperation2 = Instant.now(); CosmosItemResponse<TestItem> operation2Response = container.upsertItem(testItem); Instant afterOperation2 = Instant.now(); Thread.sleep(1000); Instant beforeOperation3 = Instant.now(); CosmosItemResponse<TestItem> operation3Response = container.upsertItem(testItem); Instant afterOperation3 = Instant.now(); validateRntbdStatistics(operation3Response.getDiagnostics(), beforeClientInitialization, beforeInitializingRntbdServiceEndpoint, afterInitializingRntbdServiceEndpoint, beforeOperation2, afterOperation2, beforeOperation3, afterOperation3, connectionStateListenerEnabled); CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } finally { LifeCycleUtils.closeQuietly(client1); } } private void validateRntbdStatistics(CosmosDiagnostics cosmosDiagnostics, Instant clientInitializationTime, Instant beforeInitializingRntbdServiceEndpoint, Instant afterInitializingRntbdServiceEndpoint, Instant beforeOperation2, Instant afterOperation2, Instant beforeOperation3, Instant afterOperation3, boolean connectionStateListenerEnabled) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); assertThat(storeResult).isNotNull(); JsonNode replicaStatusList = storeResult.get("replicaStatusList"); assertThat(replicaStatusList.isArray()).isTrue(); assertThat(replicaStatusList.size()).isGreaterThan(0); JsonNode serviceEndpointStatistics = storeResult.get("serviceEndpointStatistics"); assertThat(serviceEndpointStatistics).isNotNull(); assertThat(serviceEndpointStatistics.get("availableChannels").asInt(-1)).isGreaterThan(0); assertThat(serviceEndpointStatistics.get("acquiredChannels").asInt(-1)).isEqualTo(0); assertThat(serviceEndpointStatistics.get("inflightRequests").asInt(-1)).isEqualTo(1); assertThat(serviceEndpointStatistics.get("isClosed").asBoolean()).isEqualTo(false); JsonNode channelStatistics = storeResult.get("channelStatistics"); assertThat(channelStatistics).isNotNull(); assertThat(channelStatistics.get("channelId").asText()).isNotEmpty(); assertThat(channelStatistics.get("channelTaskQueueSize").asInt(-1)).isGreaterThanOrEqualTo(0); assertThat(channelStatistics.get("pendingRequestsCount").asInt(-1)).isGreaterThanOrEqualTo(0); assertThat(channelStatistics.get("lastReadTime").asText()).isNotEmpty(); assertThat(channelStatistics.get("waitForConnectionInit").asText()).isNotEmpty(); JsonNode connectionStateListenerMetrics = serviceEndpointStatistics.get("cerMetrics"); if (connectionStateListenerEnabled) { assertThat(connectionStateListenerMetrics).isNotNull(); assertThat(connectionStateListenerMetrics.get("lastCallTimestamp")).isNull(); assertThat(connectionStateListenerMetrics.get("lastActionableContext")).isNull(); } else { assertThat(connectionStateListenerMetrics).isNull(); } Instant beforeInitializationThreshold = beforeInitializingRntbdServiceEndpoint.minusMillis(1); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isAfterOrEqualTo(beforeInitializationThreshold); Instant afterInitializationThreshold = afterInitializingRntbdServiceEndpoint.plusMillis(2); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isBeforeOrEqualTo(afterInitializationThreshold); Instant afterOperation2Threshold = afterOperation2.plusMillis(2); Instant beforeOperation2Threshold = beforeOperation2.minusMillis(2); assertThat(Instant.parse(serviceEndpointStatistics.get("lastRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold.toString()) .isBeforeOrEqualTo(afterOperation2Threshold.toString()); assertThat(Instant.parse(serviceEndpointStatistics.get("lastSuccessfulRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold.toString()) .isBeforeOrEqualTo(afterOperation2Threshold.toString()); } private void validate(CosmosDiagnostics cosmosDiagnostics, int expectedRequestPayloadSize, int expectedResponsePayloadSize) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); boolean hasPayload = storeResult.get("exceptionMessage") == null; assertThat(storeResult).isNotNull(); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("requestPayloadLengthInBytes").asInt(-1)).isEqualTo(expectedRequestPayloadSize); if (hasPayload) { assertThat(storeResult.get("responsePayloadLengthInBytes").asInt(-1)).isEqualTo(expectedResponsePayloadSize); } assertThat(storeResult.get("rntbdResponseLengthInBytes").asInt(-1)).isGreaterThan(expectedResponsePayloadSize); } @Test(groups = {"emulator"}, timeOut = TIMEOUT) public void addressResolutionStatistics() { CosmosClient client1 = null; CosmosClient client2 = null; String databaseId = DatabaseForTest.generateId(); String containerId = UUID.randomUUID().toString(); CosmosDatabase cosmosDatabase = null; CosmosContainer cosmosContainer = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); client1.createDatabase(databaseId); cosmosDatabase = client1.getDatabase(databaseId); cosmosDatabase.createContainer(containerId, "/mypk"); InternalObjectNode internalObjectNode = getInternalObjectNode(); cosmosContainer = cosmosDatabase.getContainer(containerId); CosmosItemResponse<InternalObjectNode> writeResourceResponse = cosmosContainer.createItem(internalObjectNode); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"exceptionMessage\":null"); client2 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosDatabase = client2.getDatabase(databaseId); cosmosContainer = cosmosDatabase.getContainer(containerId); AsyncDocumentClient asyncDocumentClient = client2.asyncClient().getContextClient(); GlobalAddressResolver addressResolver = (GlobalAddressResolver) FieldUtils.readField(asyncDocumentClient, "addressResolver", true); @SuppressWarnings("rawtypes") Map addressCacheByEndpoint = (Map) FieldUtils.readField(addressResolver, "addressCacheByEndpoint", true); Object endpointCache = addressCacheByEndpoint.values().toArray()[0]; GatewayAddressCache addressCache = (GatewayAddressCache) FieldUtils.readField(endpointCache, "addressCache", true); HttpClient httpClient = httpClient(true); FieldUtils.writeField(addressCache, "httpClient", httpClient, true); new Thread(() -> { try { Thread.sleep(5000); HttpClient httpClient1 = httpClient(false); FieldUtils.writeField(addressCache, "httpClient", httpClient1, true); } catch (Exception e) { fail(e.getMessage()); } }).start(); PartitionKey partitionKey = new PartitionKey(internalObjectNode.get("mypk")); CosmosItemResponse<InternalObjectNode> readResourceResponse = cosmosContainer.readItem(internalObjectNode.getId(), partitionKey, new CosmosItemRequestOptions(), InternalObjectNode.class); assertThat(readResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(readResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"exceptionMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused"); } catch (Exception ex) { logger.error("Error in test addressResolutionStatistics", ex); fail("This test should not throw exception " + ex); } finally { safeDeleteSyncDatabase(cosmosDatabase); if (client1 != null) { client1.close(); } if (client2 != null) { client2.close(); } } } private InternalObjectNode getInternalObjectNode() { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", uuid); return internalObjectNode; } private InternalObjectNode getInternalObjectNode(String pkValue) { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", pkValue); return internalObjectNode; } private List<ClientSideRequestStatistics.StoreResponseStatistics> getStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); @SuppressWarnings({"unchecked"}) List<ClientSideRequestStatistics.StoreResponseStatistics> list = (List<ClientSideRequestStatistics.StoreResponseStatistics>) storeResponseStatisticsField.get(requestStatistics); return list; } private void clearStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); storeResponseStatisticsField.set(requestStatistics, new ArrayList<ClientSideRequestStatistics.StoreResponseStatistics>()); } private void validateTransportRequestTimelineGateway(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"requestSent\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"received\""); } private void validateTransportRequestTimelineDirect(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"created\""); assertThat(diagnostics).contains("\"eventName\":\"queued\""); assertThat(diagnostics).contains("\"eventName\":\"channelAcquisitionStarted\""); assertThat(diagnostics).contains("\"eventName\":\"pipelined\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"decodeTime"); assertThat(diagnostics).contains("\"eventName\":\"received\""); assertThat(diagnostics).contains("\"eventName\":\"completed\""); assertThat(diagnostics).contains("\"startTimeUTC\""); assertThat(diagnostics).contains("\"durationInMilliSecs\""); } public void isValidJSON(final String json) { try { final JsonParser parser = new JsonFactory().createParser(json); while (parser.nextToken() != null) { } } catch (IOException ex) { fail("Diagnostic string is not in json format ", ex); } } private HttpClient httpClient(boolean fakeProxy) { HttpClientConfig httpClientConfig; if(fakeProxy) { httpClientConfig = new HttpClientConfig(new Configs()) .withProxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 8888))); } else { httpClientConfig = new HttpClientConfig(new Configs()); } return HttpClient.createFixed(httpClientConfig); } private IndexUtilizationInfo createFromJSONString(String jsonString) { ObjectMapper indexUtilizationInfoObjectMapper = new ObjectMapper(); IndexUtilizationInfo indexUtilizationInfo = null; try { indexUtilizationInfo = indexUtilizationInfoObjectMapper.readValue(jsonString, IndexUtilizationInfo.class); } catch (JsonProcessingException e) { logger.error("Json not correctly formed ", e); } return indexUtilizationInfo; } private void validateRegionContacted(CosmosDiagnostics cosmosDiagnostics, CosmosAsyncClient cosmosAsyncClient) throws Exception { RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) ReflectionUtils.getAsyncDocumentClient(cosmosAsyncClient); GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); LocationCache locationCache = ReflectionUtils.getLocationCache(globalEndpointManager); Field locationInfoField = LocationCache.class.getDeclaredField("locationInfo"); locationInfoField.setAccessible(true); Object locationInfo = locationInfoField.get(locationCache); Class<?> DatabaseAccountLocationsInfoClass = Class.forName("com.azure.cosmos.implementation.routing" + ".LocationCache$DatabaseAccountLocationsInfo"); Field availableWriteEndpointByLocation = DatabaseAccountLocationsInfoClass.getDeclaredField( "availableWriteEndpointByLocation"); availableWriteEndpointByLocation.setAccessible(true); @SuppressWarnings("unchecked") Map<String, URI> map = (Map<String, URI>) availableWriteEndpointByLocation.get(locationInfo); String regionName = map.keySet().iterator().next(); assertThat(cosmosDiagnostics.getContactedRegionNames().size()).isEqualTo(1); assertThat(cosmosDiagnostics.getContactedRegionNames().iterator().next()).isEqualTo(regionName.toLowerCase()); } private void validateChannelStatistics(CosmosDiagnostics cosmosDiagnostics) { for (ClientSideRequestStatistics clientSideRequestStatistics : cosmosDiagnostics.getClientSideRequestStatistics()) { for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics : clientSideRequestStatistics.getResponseStatisticsList()) { assertThat(storeResponseStatistics).isNotNull(); RntbdChannelStatistics rntbdChannelStatistics = storeResponseStatistics .getStoreResult() .getStoreResponseDiagnostics() .getRntbdChannelStatistics(); assertThat(rntbdChannelStatistics).isNotNull(); try { String rntbdChannelStatisticsString = Utils.getSimpleObjectMapper().writeValueAsString(rntbdChannelStatistics); assertThat(rntbdChannelStatisticsString).contains("\"channelId\":\"" + rntbdChannelStatistics.getChannelId() + "\""); assertThat(rntbdChannelStatisticsString) .contains("\"channelTaskQueueSize\":" + rntbdChannelStatistics.getChannelTaskQueueSize()); assertThat(rntbdChannelStatisticsString) .contains("\"pendingRequestsCount\":" + rntbdChannelStatistics.getPendingRequestsCount()); assertThat(rntbdChannelStatisticsString) .contains("\"lastReadTime\":\"" + rntbdChannelStatistics.getLastReadTime() + "\""); if (rntbdChannelStatistics.getTransitTimeoutCount() > 0) { assertThat(rntbdChannelStatisticsString) .contains("\"transitTimeoutCount\":" + rntbdChannelStatistics.getTransitTimeoutCount()); assertThat(rntbdChannelStatisticsString) .contains("\"transitTimeoutStartingTime\":\"" + rntbdChannelStatistics.getTransitTimeoutStartingTime() + "\""); } else { assertThat(rntbdChannelStatisticsString) .doesNotContain("\"transitTimeoutCount\":" + rntbdChannelStatistics.getTransitTimeoutCount()); assertThat(rntbdChannelStatisticsString) .doesNotContain("\"transitTimeoutStartingTime\":\"" + rntbdChannelStatistics.getTransitTimeoutStartingTime() + "\""); } assertThat(rntbdChannelStatisticsString).contains("\"waitForConnectionInit\":" + rntbdChannelStatistics.isWaitForConnectionInit()); } catch (JsonProcessingException e) { fail("Failed to parse RntbdChannelStatistics"); } } } } public static class TestItem { public String id; public String mypk; public TestItem() { } } }
class CosmosDiagnosticsTest extends TestSuiteBase { private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final String USER_AGENT_SUFFIX_GATEWAY_CLIENT = "gatewayClientSuffix"; private static final String USER_AGENT_SUFFIX_DIRECT_CLIENT = "directClientSuffix"; private static final DateTimeFormatter RESPONSE_TIME_FORMATTER = DateTimeFormatter.ISO_INSTANT; private static final String tempMachineId = getTempMachineId(); private CosmosClient gatewayClient; private CosmosClient directClient; private CosmosAsyncDatabase cosmosAsyncDatabase; private CosmosContainer containerGateway; private CosmosContainer containerDirect; private CosmosAsyncContainer cosmosAsyncContainer; private String gatewayClientUserAgent; private String directClientUserAgent; private static String getTempMachineId() { Field field = null; try { field = RxDocumentClientImpl.class.getDeclaredField("tempMachineId"); } catch (NoSuchFieldException e) { fail(e.toString()); } field.setAccessible(true); try { return (String)field.get(null); } catch (IllegalAccessException e) { fail(e.toString()); return null; } } @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() { assertThat(this.gatewayClient).isNull(); gatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .userAgentSuffix(USER_AGENT_SUFFIX_GATEWAY_CLIENT) .gatewayMode() .buildClient(); UserAgentContainer userAgentContainer = new UserAgentContainer(); userAgentContainer.setSuffix(USER_AGENT_SUFFIX_GATEWAY_CLIENT); this.gatewayClientUserAgent = userAgentContainer.getUserAgent(); directClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .userAgentSuffix(USER_AGENT_SUFFIX_DIRECT_CLIENT) .directMode() .buildClient(); userAgentContainer.setSuffix(USER_AGENT_SUFFIX_DIRECT_CLIENT); this.directClientUserAgent = userAgentContainer.getUserAgent(); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(this.gatewayClient.asyncClient()); cosmosAsyncDatabase = directClient.asyncClient().getDatabase(cosmosAsyncContainer.getDatabase().getId()); containerGateway = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); containerDirect = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { if (this.gatewayClient != null) { this.gatewayClient.close(); } if (this.directClient != null) { this.directClient.close(); } } @DataProvider(name = "query") private Object[][] query() { return new Object[][]{ new Object[] { "Select * from c where c.id = 'wrongId'", true }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", true }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", true }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, }; } @DataProvider(name = "readAllItemsOfLogicalPartition") private Object[][] readAllItemsOfLogicalPartition() { return new Object[][]{ new Object[] { 1, true }, new Object[] { 5, null }, new Object[] { 20, null }, new Object[] { 1, false }, new Object[] { 5, false }, new Object[] { 20, false }, }; } @DataProvider(name = "connectionStateListenerArgProvider") public Object[][] connectionStateListenerArgProvider() { return new Object[][]{ {true}, {false} }; } @Test(groups = {"simple"}, timeOut = TIMEOUT) @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnosticsOnException() throws Exception { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; try { createResponse = this.containerGateway.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = this.containerGateway.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).contains("\"userAgent\":\"" + this.gatewayClientUserAgent + "\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"statusCode\":404"); assertThat(diagnostics).contains("\"operationType\":\"Read\""); assertThat(diagnostics).contains("\"userAgent\":\"" + this.gatewayClientUserAgent + "\""); assertThat(diagnostics).contains("\"exceptionMessage\":\"Entity with the specified id does not exist in the system."); assertThat(diagnostics).contains("\"exceptionResponseHeaders\""); assertThat(diagnostics).doesNotContain("\"exceptionResponseHeaders\": \"{}\""); assertThat(diagnostics).containsAnyOf( "\"machineId\":\"" + tempMachineId + "\"", "\"machineId\":\"" + ClientTelemetry.getMachineId(null) + "\"" ); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull(); validateRegionContacted(createResponse.getDiagnostics(), this.containerGateway.asyncContainer.getDatabase().getClient()); assertThat(createResponse.getDiagnostics().getRegionsContacted()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); isValidJSON(diagnostics); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void systemDiagnosticsForSystemStateInformation() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = this.containerGateway.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("systemInformation"); assertThat(diagnostics).contains("usedMemory"); assertThat(diagnostics).contains("availableMemory"); assertThat(diagnostics).contains("systemCpuLoad"); assertThat(diagnostics).contains("\"userAgent\":\"" + this.gatewayClientUserAgent + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnostics() throws Exception { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = containerDirect.createItem(internalObjectNode); validateDirectModeDiagnosticsOnSuccess(createResponse.getDiagnostics(), directClient, this.directClientUserAgent); try { containerDirect.createItem(internalObjectNode); fail("expected 409"); } catch (CosmosException e) { validateDirectModeDiagnosticsOnException(e.getDiagnostics(), this.directClientUserAgent); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void requestSessionTokenDiagnostics() { CosmosClient testSessionTokenClient = null; try { testSessionTokenClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .consistencyLevel(ConsistencyLevel.SESSION) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer cosmosContainer = testSessionTokenClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"requestSessionToken\":null"); String sessionToken = createResponse.getSessionToken(); CosmosItemResponse<InternalObjectNode> readResponse = cosmosContainer.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey(BridgeInternal.getProperties(createResponse).getId()), InternalObjectNode.class); diagnostics = readResponse.getDiagnostics().toString(); assertThat(diagnostics).contains(String.format("\"requestSessionToken\":\"%s\"", sessionToken)); CosmosBatch batch = CosmosBatch.createCosmosBatch(new PartitionKey( BridgeInternal.getProperties(createResponse).getId())); internalObjectNode = getInternalObjectNode(); batch.createItemOperation(internalObjectNode); CosmosBatchResponse batchResponse = cosmosContainer.executeCosmosBatch(batch, new CosmosBatchRequestOptions().setSessionToken(readResponse.getSessionToken())); diagnostics = batchResponse.getDiagnostics().toString(); assertThat(diagnostics).contains(String.format("\"requestSessionToken\":\"%s\"", readResponse.getSessionToken())); } finally { if (testSessionTokenClient != null) { testSessionTokenClient.close(); } } } @Test(groups = {"simple"}) public void databaseAccountToClients() { CosmosClient testClient = null; try { testClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer cosmosContainer = testClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains(String.format("\"clientEndpoints\"" + ":{\"%s\"", TestConfigurations.HOST)); int clientsIndex = diagnostics.indexOf("\"clientEndpoints\":"); String[] substrings = diagnostics.substring(clientsIndex, clientsIndex + 120) .split("}")[0].split(":"); String intString = substrings[substrings.length-1]; int intValue = Integer.parseInt(intString); CosmosClient testClient2 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); internalObjectNode = getInternalObjectNode(); createResponse = cosmosContainer.createItem(internalObjectNode); diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains(String.format("\"clientEndpoints\"" + ":{\"%s\"", TestConfigurations.HOST)); clientsIndex = diagnostics.indexOf("\"clientEndpoints\":"); substrings = diagnostics.substring(clientsIndex, clientsIndex + 120) .split("}")[0].split(":"); intString = substrings[substrings.length-1]; assertThat(Integer.parseInt(intString)).isEqualTo(intValue+1); testClient2.close(); } finally { if (testClient != null) { testClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryPlanDiagnostics() throws JsonProcessingException { List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = containerDirect.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); queryList.add("Select * from c"); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); queryList.add("Select * from c where c.id = 'wrongId'"); for(String query : queryList) { int feedResponseCounter = 0; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = containerDirect.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); if (feedResponseCounter == 0) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); String requestTimeLine = OBJECT_MAPPER.writeValueAsString(feedResponse.getCosmosDiagnostics().getFeedResponseDiagnostics().getQueryPlanDiagnosticsContext().getRequestTimeline()); assertThat(requestTimeLine).contains("connectionConfigured"); assertThat(requestTimeLine).contains("requestSent"); assertThat(requestTimeLine).contains("transitTime"); assertThat(requestTimeLine).contains("received"); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan RequestTimeline ="); } feedResponseCounter++; } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void readManyDiagnostics() { String pkValue = UUID.randomUUID().toString(); PartitionKey partitionKey = new PartitionKey(pkValue); List<CosmosItemIdentity> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(pkValue); CosmosItemResponse<InternalObjectNode> createResponse = containerDirect.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(new CosmosItemIdentity(partitionKey, internalObjectNode.getId())); } } FeedResponse<InternalObjectNode> response = containerDirect.readMany(itemIdList, InternalObjectNode.class); FeedResponseDiagnostics diagnostics = response.getCosmosDiagnostics().getFeedResponseDiagnostics(); assertThat(diagnostics.getClientSideRequestStatistics().size()).isEqualTo(1); assertThat(diagnostics.getQueryMetricsMap().values().iterator().next().getRetrievedDocumentCount()).isEqualTo(itemIdList.size()); String cosmosDiagnosticsString = response.getCosmosDiagnostics().toString(); assertThat(cosmosDiagnosticsString).contains("\"userAgent\":\"" + this.directClientUserAgent + "\""); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithIndexMetrics() { List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = containerDirect.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); for (String query : queryList) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); options.setIndexMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = containerDirect.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); logger.info("This is query diagnostics {}", queryDiagnostics); if (feedResponse.getResponseHeaders().containsKey(HttpConstants.HttpHeaders.INDEX_UTILIZATION)) { assertThat(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION)).isNotNull(); assertThat(createFromJSONString(Utils.decodeBase64String(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION))).getUtilizedSingleIndexes()).isNotNull(); } } } } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT) public void queryMetrics(String query, Boolean qmEnabled) { CosmosContainer directContainer = this.directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()) .getContainer(this.cosmosAsyncContainer.getId()); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } boolean qroupByFirstResponse = true; Iterator<FeedResponse<InternalObjectNode>> iterator = directContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateDirectModeQueryDiagnostics(queryDiagnostics, this.directClientUserAgent); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryDiagnosticsOnOrderBy() { String containerId = "testcontainer"; cosmosAsyncDatabase.createContainer(containerId, "/mypk", ThroughputProperties.createManualThroughput(40000)).block(); CosmosAsyncContainer testcontainer = cosmosAsyncDatabase.getContainer(containerId); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setConsistencyLevel(ConsistencyLevel.EVENTUAL); testcontainer.createItem(getInternalObjectNode()).block(); options.setMaxDegreeOfParallelism(-1); String query = "SELECT * from c ORDER BY c._ts DESC"; CosmosPagedFlux<InternalObjectNode> cosmosPagedFlux = testcontainer.queryItems(query, options, InternalObjectNode.class); Set<String> partitionKeyRangeIds = new HashSet<>(); Set<String> pkRids = new HashSet<>(); cosmosPagedFlux.byPage().flatMap(feedResponse -> { String cosmosDiagnosticsString = feedResponse.getCosmosDiagnostics().toString(); Pattern pattern = Pattern.compile("(\"partitionKeyRangeId\":\")(\\d)"); Matcher matcher = pattern.matcher(cosmosDiagnosticsString); while (matcher.find()) { String group = matcher.group(2); partitionKeyRangeIds.add(group); } pattern = Pattern.compile("(pkrId:)(\\d)"); matcher = pattern.matcher(cosmosDiagnosticsString); while (matcher.find()) { String group = matcher.group(2); pkRids.add(group); } return Flux.just(feedResponse); }).blockLast(); assertThat(pkRids).isNotEmpty(); assertThat(pkRids).isEqualTo(partitionKeyRangeIds); deleteCollection(testcontainer); } private void validateDirectModeDiagnosticsOnSuccess( CosmosDiagnostics cosmosDiagnostics, CosmosClient testDirectClient, String userAgent) throws Exception { String diagnostics = cosmosDiagnostics.toString(); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"PARTITION_KEY_RANGE_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"SERVER_ADDRESS_LOOKUP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + userAgent + "\""); assertThat(diagnostics).containsAnyOf( "\"machineId\":\"" + tempMachineId + "\"", "\"machineId\":\"" + ClientTelemetry.getMachineId(null) + "\"" ); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(diagnostics).contains("\"retryAfterInMs\""); assertThat(diagnostics).contains("\"channelStatistics\""); assertThat(cosmosDiagnostics.getContactedRegionNames()).isNotEmpty(); assertThat(cosmosDiagnostics.getDuration()).isNotNull(); validateTransportRequestTimelineDirect(diagnostics); validateRegionContacted(cosmosDiagnostics, testDirectClient.asyncClient()); validateChannelStatistics(cosmosDiagnostics); isValidJSON(diagnostics); } private void validateDirectModeDiagnosticsOnException(CosmosDiagnostics cosmosDiagnostics, String userAgent) { String diagnosticsString = cosmosDiagnostics.toString(); assertThat(diagnosticsString).contains("\"backendLatencyInMs\""); assertThat(diagnosticsString).contains("\"userAgent\":\"" + userAgent + "\""); assertThat(diagnosticsString).contains("\"retryAfterInMs\""); assertThat(diagnosticsString).contains("\"exceptionMessage\":\"[\\\"Resource with specified id or name already exists.\\\"]\""); assertThat(diagnosticsString).contains("\"exceptionResponseHeaders\""); assertThat(diagnosticsString).doesNotContain("\"exceptionResponseHeaders\": \"{}\""); validateTransportRequestTimelineDirect(diagnosticsString); validateChannelStatistics(cosmosDiagnostics); } private void validateDirectModeQueryDiagnostics(String diagnostics, String userAgent) { assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("responseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"userAgent\":\"" + userAgent + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); } private void validateGatewayModeQueryDiagnostics(String diagnostics, String userAgent) { assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Query\""); assertThat(diagnostics).contains("\"userAgent\":\"" + userAgent + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"regionsContacted\""); } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT*2) public void queryDiagnosticsGatewayMode(String query, Boolean qmEnabled) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); List<String> itemIdList = new ArrayList<>(); for (int i = 0; i < 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); containerGateway.createItem(internalObjectNode); if (i % 20 == 0) { itemIdList.add(internalObjectNode.getId()); } } boolean qroupByFirstResponse = true; if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } Iterator<FeedResponse<InternalObjectNode>> iterator = containerGateway .queryItems(query, options, InternalObjectNode.class) .iterableByPage() .iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateGatewayModeQueryDiagnostics(queryDiagnostics, this.gatewayClientUserAgent); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithADifferentLocale() { Locale.setDefault(Locale.GERMAN); String query = "select * from root where root.id= \"someid\""; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); Iterator<FeedResponse<InternalObjectNode>> iterator = this.containerGateway.queryItems(query, options, InternalObjectNode.class) .iterableByPage().iterator(); double requestCharge = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); requestCharge += feedResponse.getRequestCharge(); } assertThat(requestCharge).isGreaterThan(0); Locale.setDefault(Locale.ROOT); } private static void validateQueryDiagnostics( String queryDiagnostics, Boolean qmEnabled, boolean expectQueryPlanDiagnostics) { if (qmEnabled == null || qmEnabled) { assertThat(queryDiagnostics).contains("Retrieved Document Count"); assertThat(queryDiagnostics).contains("Query Preparation Times"); assertThat(queryDiagnostics).contains("Runtime Execution Times"); assertThat(queryDiagnostics).contains("Partition Execution Timeline"); } else { assertThat(queryDiagnostics).doesNotContain("Retrieved Document Count"); assertThat(queryDiagnostics).doesNotContain("Query Preparation Times"); assertThat(queryDiagnostics).doesNotContain("Runtime Execution Times"); assertThat(queryDiagnostics).doesNotContain("Partition Execution Timeline"); } if (expectQueryPlanDiagnostics) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); } } @Test(groups = {"simple"}, dataProvider = "readAllItemsOfLogicalPartition", timeOut = TIMEOUT) public void queryMetricsForReadAllItemsOfLogicalPartition(Integer expectedItemCount, Boolean qmEnabled) { String pkValue = UUID.randomUUID().toString(); for (int i = 0; i < expectedItemCount; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(pkValue); CosmosItemResponse<InternalObjectNode> createResponse = containerGateway.createItem(internalObjectNode); } CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options = options.setQueryMetricsEnabled(qmEnabled); } ModelBridgeInternal.setQueryRequestOptionsMaxItemCount(options, 5); Iterator<FeedResponse<InternalObjectNode>> iterator = this.containerGateway .readAllItems( new PartitionKey(pkValue), options, InternalObjectNode.class) .iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); int actualItemCount = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); actualItemCount += feedResponse.getResults().size(); validateQueryDiagnostics(queryDiagnostics, qmEnabled, false); } assertThat(actualItemCount).isEqualTo(expectedItemCount); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnException() throws Exception { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; try { createResponse = containerDirect.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = containerDirect.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("\"userAgent\":\"" + this.directClientUserAgent + "\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(diagnostics).contains("\"retryAfterInMs\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"exceptionMessage\":\"[\\\"Resource Not Found."); assertThat(diagnostics).contains("\"exceptionResponseHeaders\""); assertThat(diagnostics).doesNotContain("\"exceptionResponseHeaders\":null"); isValidJSON(diagnostics); validateTransportRequestTimelineDirect(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), directClient.asyncClient()); ObjectNode diagnosticsNode = (ObjectNode) OBJECT_MAPPER.readTree(diagnostics); JsonNode responseStatisticsList = diagnosticsNode.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); assertThat(storeResult).isNotNull(); JsonNode replicaStatusList = storeResult.get("replicaStatusList"); assertThat(replicaStatusList.isArray()).isTrue(); assertThat(replicaStatusList.size()).isGreaterThan(0); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnMetadataException() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); HttpClient mockHttpClient = Mockito.mock(HttpClient.class); Mockito.when(mockHttpClient.send(Mockito.any(HttpRequest.class), Mockito.any(Duration.class))) .thenReturn(Mono.error(new CosmosException(400, "TestBadRequest"))); RxStoreModel rxGatewayStoreModel = rxGatewayStoreModel = ReflectionUtils.getGatewayProxy((RxDocumentClientImpl) client.asyncClient().getDocClientWrapper()); ReflectionUtils.setGatewayHttpClient(rxGatewayStoreModel, mockHttpClient); container.createItem(internalObjectNode); fail("request should fail as bad request"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.BADREQUEST); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("\"exceptionMessage\":\"TestBadRequest\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(diagnostics).contains("\"resourceType\":\"DocumentCollection\""); assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); isValidJSON(diagnostics); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void supplementalResponseStatisticsList() throws Exception { ClientSideRequestStatistics clientSideRequestStatistics = new ClientSideRequestStatistics(mockDiagnosticsClientContext()); for (int i = 0; i < 15; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null, null); } List<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); ObjectMapper objectMapper = new ObjectMapper(); String diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); JsonNode jsonNode = objectMapper.readTree(diagnostics); ArrayNode supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(15); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(10); clearStoreResponseStatistics(clientSideRequestStatistics); storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); assertThat(storeResponseStatistics.size()).isEqualTo(0); for (int i = 0; i < 7; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null, null); } storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); objectMapper = new ObjectMapper(); diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); jsonNode = objectMapper.readTree(diagnostics); supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(7); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(7); for(JsonNode node : supplementalResponseStatisticsListNode) { assertThat(node.get("storeResult").asText()).isNotNull(); String requestResponseTimeUTC = node.get("requestResponseTimeUTC").asText(); Instant instant = Instant.from(RESPONSE_TIME_FORMATTER.parse(requestResponseTimeUTC)); assertThat(Instant.now().toEpochMilli() - instant.toEpochMilli()).isLessThan(5000); assertThat(node.get("requestResponseTimeUTC")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); assertThat(node.get("requestSessionToken")).isNotNull(); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void serializationOnVariousScenarios() { CosmosDatabaseResponse cosmosDatabase = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).read(); String diagnostics = cosmosDatabase.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"DATABASE_DESERIALIZATION\""); CosmosContainerResponse containerResponse = this.containerGateway.read(); diagnostics = containerResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"CONTAINER_DESERIALIZATION\""); TestItem testItem = new TestItem(); testItem.id = "TestId"; testItem.mypk = "TestPk"; CosmosItemResponse<TestItem> itemResponse = this.containerGateway.createItem(testItem); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); testItem.id = "TestId2"; testItem.mypk = "TestPk"; itemResponse = this.containerGateway.createItem(testItem, new PartitionKey("TestPk"), null); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).doesNotContain("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).doesNotContain("\"serializationType\":\"ITEM_DESERIALIZATION\""); TestItem readTestItem = itemResponse.getItem(); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); CosmosItemResponse<InternalObjectNode> readItemResponse = this.containerGateway.readItem(testItem.id, new PartitionKey(testItem.mypk), null, InternalObjectNode.class); InternalObjectNode properties = readItemResponse.getItem(); diagnostics = readItemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + this.gatewayClientUserAgent + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdRequestResponseLengthStatistics() throws Exception { TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); CosmosItemResponse<TestItem> createItemResponse = container.createItem(testItem); validate(createItemResponse.getDiagnostics(), testItemLength, ModelBridgeInternal.getPayloadLength(createItemResponse)); try { container.createItem(testItem); fail("expected to fail due to 409"); } catch (CosmosException e) { logger.info("Diagnostics are : {}", e.getDiagnostics()); String diagnostics = e.getDiagnostics().toString(); assertThat(diagnostics).contains("\"exceptionMessage\":\"[\\\"Resource with specified id or name already exists.\\\"]\""); assertThat(diagnostics).contains("\"exceptionResponseHeaders\""); assertThat(diagnostics).doesNotContain("\"exceptionResponseHeaders\": \"{}\""); validate(e.getDiagnostics(), testItemLength, 0); } CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } @Test(groups = {"simple"}, dataProvider = "connectionStateListenerArgProvider", timeOut = TIMEOUT) public void rntbdStatistics(boolean connectionStateListenerEnabled) throws Exception { Instant beforeClientInitialization = Instant.now(); CosmosClient client1 = null; try { DirectConnectionConfig connectionConfig = DirectConnectionConfig.getDefaultConfig(); connectionConfig.setConnectionEndpointRediscoveryEnabled(connectionStateListenerEnabled); client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .directMode(connectionConfig) .buildClient(); TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = client1.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); Thread.sleep(1000); Instant beforeInitializingRntbdServiceEndpoint = Instant.now(); CosmosItemResponse<TestItem> operation1Response = container.upsertItem(testItem); Instant afterInitializingRntbdServiceEndpoint = Instant.now(); Thread.sleep(1000); Instant beforeOperation2 = Instant.now(); CosmosItemResponse<TestItem> operation2Response = container.upsertItem(testItem); Instant afterOperation2 = Instant.now(); Thread.sleep(1000); Instant beforeOperation3 = Instant.now(); CosmosItemResponse<TestItem> operation3Response = container.upsertItem(testItem); Instant afterOperation3 = Instant.now(); validateRntbdStatistics(operation3Response.getDiagnostics(), beforeClientInitialization, beforeInitializingRntbdServiceEndpoint, afterInitializingRntbdServiceEndpoint, beforeOperation2, afterOperation2, beforeOperation3, afterOperation3, connectionStateListenerEnabled); CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } finally { LifeCycleUtils.closeQuietly(client1); } } private void validateRntbdStatistics(CosmosDiagnostics cosmosDiagnostics, Instant clientInitializationTime, Instant beforeInitializingRntbdServiceEndpoint, Instant afterInitializingRntbdServiceEndpoint, Instant beforeOperation2, Instant afterOperation2, Instant beforeOperation3, Instant afterOperation3, boolean connectionStateListenerEnabled) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); assertThat(storeResult).isNotNull(); JsonNode replicaStatusList = storeResult.get("replicaStatusList"); assertThat(replicaStatusList.isArray()).isTrue(); assertThat(replicaStatusList.size()).isGreaterThan(0); JsonNode serviceEndpointStatistics = storeResult.get("serviceEndpointStatistics"); assertThat(serviceEndpointStatistics).isNotNull(); assertThat(serviceEndpointStatistics.get("availableChannels").asInt(-1)).isGreaterThan(0); assertThat(serviceEndpointStatistics.get("acquiredChannels").asInt(-1)).isEqualTo(0); assertThat(serviceEndpointStatistics.get("inflightRequests").asInt(-1)).isEqualTo(1); assertThat(serviceEndpointStatistics.get("isClosed").asBoolean()).isEqualTo(false); JsonNode channelStatistics = storeResult.get("channelStatistics"); assertThat(channelStatistics).isNotNull(); assertThat(channelStatistics.get("channelId").asText()).isNotEmpty(); assertThat(channelStatistics.get("channelTaskQueueSize").asInt(-1)).isGreaterThanOrEqualTo(0); assertThat(channelStatistics.get("pendingRequestsCount").asInt(-1)).isGreaterThanOrEqualTo(0); assertThat(channelStatistics.get("lastReadTime").asText()).isNotEmpty(); assertThat(channelStatistics.get("waitForConnectionInit").asText()).isNotEmpty(); JsonNode connectionStateListenerMetrics = serviceEndpointStatistics.get("cerMetrics"); if (connectionStateListenerEnabled) { assertThat(connectionStateListenerMetrics).isNotNull(); assertThat(connectionStateListenerMetrics.get("lastCallTimestamp")).isNull(); assertThat(connectionStateListenerMetrics.get("lastActionableContext")).isNull(); } else { assertThat(connectionStateListenerMetrics).isNull(); } Instant beforeInitializationThreshold = beforeInitializingRntbdServiceEndpoint.minusMillis(1); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isAfterOrEqualTo(beforeInitializationThreshold); Instant afterInitializationThreshold = afterInitializingRntbdServiceEndpoint.plusMillis(2); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isBeforeOrEqualTo(afterInitializationThreshold); Instant afterOperation2Threshold = afterOperation2.plusMillis(2); Instant beforeOperation2Threshold = beforeOperation2.minusMillis(2); assertThat(Instant.parse(serviceEndpointStatistics.get("lastRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold.toString()) .isBeforeOrEqualTo(afterOperation2Threshold.toString()); assertThat(Instant.parse(serviceEndpointStatistics.get("lastSuccessfulRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold.toString()) .isBeforeOrEqualTo(afterOperation2Threshold.toString()); } private void validate(CosmosDiagnostics cosmosDiagnostics, int expectedRequestPayloadSize, int expectedResponsePayloadSize) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); boolean hasPayload = storeResult.get("exceptionMessage") == null; assertThat(storeResult).isNotNull(); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("requestPayloadLengthInBytes").asInt(-1)).isEqualTo(expectedRequestPayloadSize); if (hasPayload) { assertThat(storeResult.get("responsePayloadLengthInBytes").asInt(-1)).isEqualTo(expectedResponsePayloadSize); } assertThat(storeResult.get("rntbdResponseLengthInBytes").asInt(-1)).isGreaterThan(expectedResponsePayloadSize); } @Test(groups = {"emulator"}, timeOut = TIMEOUT) public void addressResolutionStatistics() { CosmosClient client1 = null; CosmosClient client2 = null; String databaseId = DatabaseForTest.generateId(); String containerId = UUID.randomUUID().toString(); CosmosDatabase cosmosDatabase = null; CosmosContainer cosmosContainer = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); client1.createDatabase(databaseId); cosmosDatabase = client1.getDatabase(databaseId); cosmosDatabase.createContainer(containerId, "/mypk"); InternalObjectNode internalObjectNode = getInternalObjectNode(); cosmosContainer = cosmosDatabase.getContainer(containerId); CosmosItemResponse<InternalObjectNode> writeResourceResponse = cosmosContainer.createItem(internalObjectNode); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"exceptionMessage\":null"); client2 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosDatabase = client2.getDatabase(databaseId); cosmosContainer = cosmosDatabase.getContainer(containerId); AsyncDocumentClient asyncDocumentClient = client2.asyncClient().getContextClient(); GlobalAddressResolver addressResolver = (GlobalAddressResolver) FieldUtils.readField(asyncDocumentClient, "addressResolver", true); @SuppressWarnings("rawtypes") Map addressCacheByEndpoint = (Map) FieldUtils.readField(addressResolver, "addressCacheByEndpoint", true); Object endpointCache = addressCacheByEndpoint.values().toArray()[0]; GatewayAddressCache addressCache = (GatewayAddressCache) FieldUtils.readField(endpointCache, "addressCache", true); HttpClient httpClient = httpClient(true); FieldUtils.writeField(addressCache, "httpClient", httpClient, true); new Thread(() -> { try { Thread.sleep(5000); HttpClient httpClient1 = httpClient(false); FieldUtils.writeField(addressCache, "httpClient", httpClient1, true); } catch (Exception e) { fail(e.getMessage()); } }).start(); PartitionKey partitionKey = new PartitionKey(internalObjectNode.get("mypk")); CosmosItemResponse<InternalObjectNode> readResourceResponse = cosmosContainer.readItem(internalObjectNode.getId(), partitionKey, new CosmosItemRequestOptions(), InternalObjectNode.class); assertThat(readResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(readResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"exceptionMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused"); } catch (Exception ex) { logger.error("Error in test addressResolutionStatistics", ex); fail("This test should not throw exception " + ex); } finally { safeDeleteSyncDatabase(cosmosDatabase); if (client1 != null) { client1.close(); } if (client2 != null) { client2.close(); } } } private InternalObjectNode getInternalObjectNode() { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", uuid); return internalObjectNode; } private InternalObjectNode getInternalObjectNode(String pkValue) { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", pkValue); return internalObjectNode; } private List<ClientSideRequestStatistics.StoreResponseStatistics> getStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); @SuppressWarnings({"unchecked"}) List<ClientSideRequestStatistics.StoreResponseStatistics> list = (List<ClientSideRequestStatistics.StoreResponseStatistics>) storeResponseStatisticsField.get(requestStatistics); return list; } private void clearStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); storeResponseStatisticsField.set(requestStatistics, new ArrayList<ClientSideRequestStatistics.StoreResponseStatistics>()); } private void validateTransportRequestTimelineGateway(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"requestSent\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"received\""); } private void validateTransportRequestTimelineDirect(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"created\""); assertThat(diagnostics).contains("\"eventName\":\"queued\""); assertThat(diagnostics).contains("\"eventName\":\"channelAcquisitionStarted\""); assertThat(diagnostics).contains("\"eventName\":\"pipelined\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"decodeTime"); assertThat(diagnostics).contains("\"eventName\":\"received\""); assertThat(diagnostics).contains("\"eventName\":\"completed\""); assertThat(diagnostics).contains("\"startTimeUTC\""); assertThat(diagnostics).contains("\"durationInMilliSecs\""); } public void isValidJSON(final String json) { try { final JsonParser parser = new JsonFactory().createParser(json); while (parser.nextToken() != null) { } } catch (IOException ex) { fail("Diagnostic string is not in json format ", ex); } } private HttpClient httpClient(boolean fakeProxy) { HttpClientConfig httpClientConfig; if(fakeProxy) { httpClientConfig = new HttpClientConfig(new Configs()) .withProxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 8888))); } else { httpClientConfig = new HttpClientConfig(new Configs()); } return HttpClient.createFixed(httpClientConfig); } private IndexUtilizationInfo createFromJSONString(String jsonString) { ObjectMapper indexUtilizationInfoObjectMapper = new ObjectMapper(); IndexUtilizationInfo indexUtilizationInfo = null; try { indexUtilizationInfo = indexUtilizationInfoObjectMapper.readValue(jsonString, IndexUtilizationInfo.class); } catch (JsonProcessingException e) { logger.error("Json not correctly formed ", e); } return indexUtilizationInfo; } private void validateRegionContacted(CosmosDiagnostics cosmosDiagnostics, CosmosAsyncClient cosmosAsyncClient) throws Exception { RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) ReflectionUtils.getAsyncDocumentClient(cosmosAsyncClient); GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); LocationCache locationCache = ReflectionUtils.getLocationCache(globalEndpointManager); Field locationInfoField = LocationCache.class.getDeclaredField("locationInfo"); locationInfoField.setAccessible(true); Object locationInfo = locationInfoField.get(locationCache); Class<?> DatabaseAccountLocationsInfoClass = Class.forName("com.azure.cosmos.implementation.routing" + ".LocationCache$DatabaseAccountLocationsInfo"); Field availableWriteEndpointByLocation = DatabaseAccountLocationsInfoClass.getDeclaredField( "availableWriteEndpointByLocation"); availableWriteEndpointByLocation.setAccessible(true); @SuppressWarnings("unchecked") Map<String, URI> map = (Map<String, URI>) availableWriteEndpointByLocation.get(locationInfo); String regionName = map.keySet().iterator().next(); assertThat(cosmosDiagnostics.getContactedRegionNames().size()).isEqualTo(1); assertThat(cosmosDiagnostics.getContactedRegionNames().iterator().next()).isEqualTo(regionName.toLowerCase()); } private void validateChannelStatistics(CosmosDiagnostics cosmosDiagnostics) { for (ClientSideRequestStatistics clientSideRequestStatistics : cosmosDiagnostics.getClientSideRequestStatistics()) { for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics : clientSideRequestStatistics.getResponseStatisticsList()) { assertThat(storeResponseStatistics).isNotNull(); RntbdChannelStatistics rntbdChannelStatistics = storeResponseStatistics .getStoreResult() .getStoreResponseDiagnostics() .getRntbdChannelStatistics(); assertThat(rntbdChannelStatistics).isNotNull(); try { String rntbdChannelStatisticsString = Utils.getSimpleObjectMapper().writeValueAsString(rntbdChannelStatistics); assertThat(rntbdChannelStatisticsString).contains("\"channelId\":\"" + rntbdChannelStatistics.getChannelId() + "\""); assertThat(rntbdChannelStatisticsString) .contains("\"channelTaskQueueSize\":" + rntbdChannelStatistics.getChannelTaskQueueSize()); assertThat(rntbdChannelStatisticsString) .contains("\"pendingRequestsCount\":" + rntbdChannelStatistics.getPendingRequestsCount()); assertThat(rntbdChannelStatisticsString) .contains("\"lastReadTime\":\"" + rntbdChannelStatistics.getLastReadTime() + "\""); if (rntbdChannelStatistics.getTransitTimeoutCount() > 0) { assertThat(rntbdChannelStatisticsString) .contains("\"transitTimeoutCount\":" + rntbdChannelStatistics.getTransitTimeoutCount()); assertThat(rntbdChannelStatisticsString) .contains("\"transitTimeoutStartingTime\":\"" + rntbdChannelStatistics.getTransitTimeoutStartingTime() + "\""); } else { assertThat(rntbdChannelStatisticsString) .doesNotContain("\"transitTimeoutCount\":" + rntbdChannelStatistics.getTransitTimeoutCount()); assertThat(rntbdChannelStatisticsString) .doesNotContain("\"transitTimeoutStartingTime\":\"" + rntbdChannelStatistics.getTransitTimeoutStartingTime() + "\""); } assertThat(rntbdChannelStatisticsString).contains("\"waitForConnectionInit\":" + rntbdChannelStatistics.isWaitForConnectionInit()); } catch (JsonProcessingException e) { fail("Failed to parse RntbdChannelStatistics"); } } } } public static class TestItem { public String id; public String mypk; public TestItem() { } } }
Uncommented the assertions and removed the comments
public void gatewayDiagnostics() throws Exception { Thread.sleep(2000); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = containerGateway.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).contains("\"userAgent\":\"" + this.gatewayClientUserAgent + "\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Create\""); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + this.gatewayClientUserAgent + "\""); assertThat(diagnostics).containsAnyOf( "\"machineId\":\"" + tempMachineId + "\"", "\"machineId\":\"" + ClientTelemetry.getMachineId(null) + "\"" ); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), gatewayClient.asyncClient()); isValidJSON(diagnostics); }
public void gatewayDiagnostics() throws Exception { Thread.sleep(2000); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = containerGateway.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).contains("\"userAgent\":\"" + this.gatewayClientUserAgent + "\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Create\""); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + this.gatewayClientUserAgent + "\""); assertThat(diagnostics).containsAnyOf( "\"machineId\":\"" + tempMachineId + "\"", "\"machineId\":\"" + ClientTelemetry.getMachineId(null) + "\"" ); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull(); assertThat(createResponse.getDiagnostics().getRegionsContacted()).isNotEmpty(); validateTransportRequestTimelineGateway(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), gatewayClient.asyncClient()); isValidJSON(diagnostics); }
class CosmosDiagnosticsTest extends TestSuiteBase { private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final String USER_AGENT_SUFFIX_GATEWAY_CLIENT = "gatewayClientSuffix"; private static final String USER_AGENT_SUFFIX_DIRECT_CLIENT = "directClientSuffix"; private static final DateTimeFormatter RESPONSE_TIME_FORMATTER = DateTimeFormatter.ISO_INSTANT; private static final String tempMachineId = getTempMachineId(); private CosmosClient gatewayClient; private CosmosClient directClient; private CosmosAsyncDatabase cosmosAsyncDatabase; private CosmosContainer containerGateway; private CosmosContainer containerDirect; private CosmosAsyncContainer cosmosAsyncContainer; private String gatewayClientUserAgent; private String directClientUserAgent; private static String getTempMachineId() { Field field = null; try { field = RxDocumentClientImpl.class.getDeclaredField("tempMachineId"); } catch (NoSuchFieldException e) { fail(e.toString()); } field.setAccessible(true); try { return (String)field.get(null); } catch (IllegalAccessException e) { fail(e.toString()); return null; } } @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() { assertThat(this.gatewayClient).isNull(); gatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .userAgentSuffix(USER_AGENT_SUFFIX_GATEWAY_CLIENT) .gatewayMode() .buildClient(); UserAgentContainer userAgentContainer = new UserAgentContainer(); userAgentContainer.setSuffix(USER_AGENT_SUFFIX_GATEWAY_CLIENT); this.gatewayClientUserAgent = userAgentContainer.getUserAgent(); directClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .userAgentSuffix(USER_AGENT_SUFFIX_DIRECT_CLIENT) .directMode() .buildClient(); userAgentContainer.setSuffix(USER_AGENT_SUFFIX_DIRECT_CLIENT); this.directClientUserAgent = userAgentContainer.getUserAgent(); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(this.gatewayClient.asyncClient()); cosmosAsyncDatabase = directClient.asyncClient().getDatabase(cosmosAsyncContainer.getDatabase().getId()); containerGateway = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); containerDirect = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { if (this.gatewayClient != null) { this.gatewayClient.close(); } if (this.directClient != null) { this.directClient.close(); } } @DataProvider(name = "query") private Object[][] query() { return new Object[][]{ new Object[] { "Select * from c where c.id = 'wrongId'", true }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", true }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", true }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, }; } @DataProvider(name = "readAllItemsOfLogicalPartition") private Object[][] readAllItemsOfLogicalPartition() { return new Object[][]{ new Object[] { 1, true }, new Object[] { 5, null }, new Object[] { 20, null }, new Object[] { 1, false }, new Object[] { 5, false }, new Object[] { 20, false }, }; } @DataProvider(name = "connectionStateListenerArgProvider") public Object[][] connectionStateListenerArgProvider() { return new Object[][]{ {true}, {false} }; } @Test(groups = {"simple"}, timeOut = TIMEOUT) @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnosticsOnException() throws Exception { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; try { createResponse = this.containerGateway.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = this.containerGateway.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).contains("\"userAgent\":\"" + this.gatewayClientUserAgent + "\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"statusCode\":404"); assertThat(diagnostics).contains("\"operationType\":\"Read\""); assertThat(diagnostics).contains("\"userAgent\":\"" + this.gatewayClientUserAgent + "\""); assertThat(diagnostics).contains("\"exceptionMessage\":\"Entity with the specified id does not exist in the system."); assertThat(diagnostics).contains("\"exceptionResponseHeaders\""); assertThat(diagnostics).doesNotContain("\"exceptionResponseHeaders\": \"{}\""); assertThat(diagnostics).containsAnyOf( "\"machineId\":\"" + tempMachineId + "\"", "\"machineId\":\"" + ClientTelemetry.getMachineId(null) + "\"" ); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull(); validateRegionContacted(createResponse.getDiagnostics(), this.containerGateway.asyncContainer.getDatabase().getClient()); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); isValidJSON(diagnostics); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void systemDiagnosticsForSystemStateInformation() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = this.containerGateway.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("systemInformation"); assertThat(diagnostics).contains("usedMemory"); assertThat(diagnostics).contains("availableMemory"); assertThat(diagnostics).contains("systemCpuLoad"); assertThat(diagnostics).contains("\"userAgent\":\"" + this.gatewayClientUserAgent + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnostics() throws Exception { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = containerDirect.createItem(internalObjectNode); validateDirectModeDiagnosticsOnSuccess(createResponse.getDiagnostics(), directClient, this.directClientUserAgent); try { containerDirect.createItem(internalObjectNode); fail("expected 409"); } catch (CosmosException e) { validateDirectModeDiagnosticsOnException(e.getDiagnostics(), this.directClientUserAgent); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void requestSessionTokenDiagnostics() { CosmosClient testSessionTokenClient = null; try { testSessionTokenClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .consistencyLevel(ConsistencyLevel.SESSION) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer cosmosContainer = testSessionTokenClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"requestSessionToken\":null"); String sessionToken = createResponse.getSessionToken(); CosmosItemResponse<InternalObjectNode> readResponse = cosmosContainer.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey(BridgeInternal.getProperties(createResponse).getId()), InternalObjectNode.class); diagnostics = readResponse.getDiagnostics().toString(); assertThat(diagnostics).contains(String.format("\"requestSessionToken\":\"%s\"", sessionToken)); CosmosBatch batch = CosmosBatch.createCosmosBatch(new PartitionKey( BridgeInternal.getProperties(createResponse).getId())); internalObjectNode = getInternalObjectNode(); batch.createItemOperation(internalObjectNode); CosmosBatchResponse batchResponse = cosmosContainer.executeCosmosBatch(batch, new CosmosBatchRequestOptions().setSessionToken(readResponse.getSessionToken())); diagnostics = batchResponse.getDiagnostics().toString(); assertThat(diagnostics).contains(String.format("\"requestSessionToken\":\"%s\"", readResponse.getSessionToken())); } finally { if (testSessionTokenClient != null) { testSessionTokenClient.close(); } } } @Test(groups = {"simple"}) public void databaseAccountToClients() { CosmosClient testClient = null; try { testClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer cosmosContainer = testClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains(String.format("\"clientEndpoints\"" + ":{\"%s\"", TestConfigurations.HOST)); int clientsIndex = diagnostics.indexOf("\"clientEndpoints\":"); String[] substrings = diagnostics.substring(clientsIndex, clientsIndex + 120) .split("}")[0].split(":"); String intString = substrings[substrings.length-1]; int intValue = Integer.parseInt(intString); CosmosClient testClient2 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); internalObjectNode = getInternalObjectNode(); createResponse = cosmosContainer.createItem(internalObjectNode); diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains(String.format("\"clientEndpoints\"" + ":{\"%s\"", TestConfigurations.HOST)); clientsIndex = diagnostics.indexOf("\"clientEndpoints\":"); substrings = diagnostics.substring(clientsIndex, clientsIndex + 120) .split("}")[0].split(":"); intString = substrings[substrings.length-1]; assertThat(Integer.parseInt(intString)).isEqualTo(intValue+1); testClient2.close(); } finally { if (testClient != null) { testClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryPlanDiagnostics() throws JsonProcessingException { List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = containerDirect.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); queryList.add("Select * from c"); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); queryList.add("Select * from c where c.id = 'wrongId'"); for(String query : queryList) { int feedResponseCounter = 0; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = containerDirect.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); if (feedResponseCounter == 0) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); String requestTimeLine = OBJECT_MAPPER.writeValueAsString(feedResponse.getCosmosDiagnostics().getFeedResponseDiagnostics().getQueryPlanDiagnosticsContext().getRequestTimeline()); assertThat(requestTimeLine).contains("connectionConfigured"); assertThat(requestTimeLine).contains("requestSent"); assertThat(requestTimeLine).contains("transitTime"); assertThat(requestTimeLine).contains("received"); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan RequestTimeline ="); } feedResponseCounter++; } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void readManyDiagnostics() { String pkValue = UUID.randomUUID().toString(); PartitionKey partitionKey = new PartitionKey(pkValue); List<CosmosItemIdentity> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(pkValue); CosmosItemResponse<InternalObjectNode> createResponse = containerDirect.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(new CosmosItemIdentity(partitionKey, internalObjectNode.getId())); } } FeedResponse<InternalObjectNode> response = containerDirect.readMany(itemIdList, InternalObjectNode.class); FeedResponseDiagnostics diagnostics = response.getCosmosDiagnostics().getFeedResponseDiagnostics(); assertThat(diagnostics.getClientSideRequestStatistics().size()).isEqualTo(1); assertThat(diagnostics.getQueryMetricsMap().values().iterator().next().getRetrievedDocumentCount()).isEqualTo(itemIdList.size()); String cosmosDiagnosticsString = response.getCosmosDiagnostics().toString(); assertThat(cosmosDiagnosticsString).contains("\"userAgent\":\"" + this.directClientUserAgent + "\""); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithIndexMetrics() { List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = containerDirect.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); for (String query : queryList) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); options.setIndexMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = containerDirect.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); logger.info("This is query diagnostics {}", queryDiagnostics); if (feedResponse.getResponseHeaders().containsKey(HttpConstants.HttpHeaders.INDEX_UTILIZATION)) { assertThat(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION)).isNotNull(); assertThat(createFromJSONString(Utils.decodeBase64String(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION))).getUtilizedSingleIndexes()).isNotNull(); } } } } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT) public void queryMetrics(String query, Boolean qmEnabled) { CosmosContainer directContainer = this.directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()) .getContainer(this.cosmosAsyncContainer.getId()); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } boolean qroupByFirstResponse = true; Iterator<FeedResponse<InternalObjectNode>> iterator = directContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateDirectModeQueryDiagnostics(queryDiagnostics, this.directClientUserAgent); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryDiagnosticsOnOrderBy() { String containerId = "testcontainer"; cosmosAsyncDatabase.createContainer(containerId, "/mypk", ThroughputProperties.createManualThroughput(40000)).block(); CosmosAsyncContainer testcontainer = cosmosAsyncDatabase.getContainer(containerId); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setConsistencyLevel(ConsistencyLevel.EVENTUAL); testcontainer.createItem(getInternalObjectNode()).block(); options.setMaxDegreeOfParallelism(-1); String query = "SELECT * from c ORDER BY c._ts DESC"; CosmosPagedFlux<InternalObjectNode> cosmosPagedFlux = testcontainer.queryItems(query, options, InternalObjectNode.class); Set<String> partitionKeyRangeIds = new HashSet<>(); Set<String> pkRids = new HashSet<>(); cosmosPagedFlux.byPage().flatMap(feedResponse -> { String cosmosDiagnosticsString = feedResponse.getCosmosDiagnostics().toString(); Pattern pattern = Pattern.compile("(\"partitionKeyRangeId\":\")(\\d)"); Matcher matcher = pattern.matcher(cosmosDiagnosticsString); while (matcher.find()) { String group = matcher.group(2); partitionKeyRangeIds.add(group); } pattern = Pattern.compile("(pkrId:)(\\d)"); matcher = pattern.matcher(cosmosDiagnosticsString); while (matcher.find()) { String group = matcher.group(2); pkRids.add(group); } return Flux.just(feedResponse); }).blockLast(); assertThat(pkRids).isNotEmpty(); assertThat(pkRids).isEqualTo(partitionKeyRangeIds); deleteCollection(testcontainer); } private void validateDirectModeDiagnosticsOnSuccess( CosmosDiagnostics cosmosDiagnostics, CosmosClient testDirectClient, String userAgent) throws Exception { String diagnostics = cosmosDiagnostics.toString(); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"PARTITION_KEY_RANGE_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"SERVER_ADDRESS_LOOKUP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + userAgent + "\""); assertThat(diagnostics).containsAnyOf( "\"machineId\":\"" + tempMachineId + "\"", "\"machineId\":\"" + ClientTelemetry.getMachineId(null) + "\"" ); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(diagnostics).contains("\"retryAfterInMs\""); assertThat(diagnostics).contains("\"channelStatistics\""); assertThat(cosmosDiagnostics.getContactedRegionNames()).isNotEmpty(); assertThat(cosmosDiagnostics.getDuration()).isNotNull(); validateTransportRequestTimelineDirect(diagnostics); validateRegionContacted(cosmosDiagnostics, testDirectClient.asyncClient()); validateChannelStatistics(cosmosDiagnostics); isValidJSON(diagnostics); } private void validateDirectModeDiagnosticsOnException(CosmosDiagnostics cosmosDiagnostics, String userAgent) { String diagnosticsString = cosmosDiagnostics.toString(); assertThat(diagnosticsString).contains("\"backendLatencyInMs\""); assertThat(diagnosticsString).contains("\"userAgent\":\"" + userAgent + "\""); assertThat(diagnosticsString).contains("\"retryAfterInMs\""); assertThat(diagnosticsString).contains("\"exceptionMessage\":\"[\\\"Resource with specified id or name already exists.\\\"]\""); assertThat(diagnosticsString).contains("\"exceptionResponseHeaders\""); assertThat(diagnosticsString).doesNotContain("\"exceptionResponseHeaders\": \"{}\""); validateTransportRequestTimelineDirect(diagnosticsString); validateChannelStatistics(cosmosDiagnostics); } private void validateDirectModeQueryDiagnostics(String diagnostics, String userAgent) { assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("responseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"userAgent\":\"" + userAgent + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); } private void validateGatewayModeQueryDiagnostics(String diagnostics, String userAgent) { assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Query\""); assertThat(diagnostics).contains("\"userAgent\":\"" + userAgent + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"regionsContacted\""); } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT*2) public void queryDiagnosticsGatewayMode(String query, Boolean qmEnabled) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); List<String> itemIdList = new ArrayList<>(); for (int i = 0; i < 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); containerGateway.createItem(internalObjectNode); if (i % 20 == 0) { itemIdList.add(internalObjectNode.getId()); } } boolean qroupByFirstResponse = true; if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } Iterator<FeedResponse<InternalObjectNode>> iterator = containerGateway .queryItems(query, options, InternalObjectNode.class) .iterableByPage() .iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateGatewayModeQueryDiagnostics(queryDiagnostics, this.gatewayClientUserAgent); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithADifferentLocale() { Locale.setDefault(Locale.GERMAN); String query = "select * from root where root.id= \"someid\""; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); Iterator<FeedResponse<InternalObjectNode>> iterator = this.containerGateway.queryItems(query, options, InternalObjectNode.class) .iterableByPage().iterator(); double requestCharge = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); requestCharge += feedResponse.getRequestCharge(); } assertThat(requestCharge).isGreaterThan(0); Locale.setDefault(Locale.ROOT); } private static void validateQueryDiagnostics( String queryDiagnostics, Boolean qmEnabled, boolean expectQueryPlanDiagnostics) { if (qmEnabled == null || qmEnabled) { assertThat(queryDiagnostics).contains("Retrieved Document Count"); assertThat(queryDiagnostics).contains("Query Preparation Times"); assertThat(queryDiagnostics).contains("Runtime Execution Times"); assertThat(queryDiagnostics).contains("Partition Execution Timeline"); } else { assertThat(queryDiagnostics).doesNotContain("Retrieved Document Count"); assertThat(queryDiagnostics).doesNotContain("Query Preparation Times"); assertThat(queryDiagnostics).doesNotContain("Runtime Execution Times"); assertThat(queryDiagnostics).doesNotContain("Partition Execution Timeline"); } if (expectQueryPlanDiagnostics) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); } } @Test(groups = {"simple"}, dataProvider = "readAllItemsOfLogicalPartition", timeOut = TIMEOUT) public void queryMetricsForReadAllItemsOfLogicalPartition(Integer expectedItemCount, Boolean qmEnabled) { String pkValue = UUID.randomUUID().toString(); for (int i = 0; i < expectedItemCount; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(pkValue); CosmosItemResponse<InternalObjectNode> createResponse = containerGateway.createItem(internalObjectNode); } CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options = options.setQueryMetricsEnabled(qmEnabled); } ModelBridgeInternal.setQueryRequestOptionsMaxItemCount(options, 5); Iterator<FeedResponse<InternalObjectNode>> iterator = this.containerGateway .readAllItems( new PartitionKey(pkValue), options, InternalObjectNode.class) .iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); int actualItemCount = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); actualItemCount += feedResponse.getResults().size(); validateQueryDiagnostics(queryDiagnostics, qmEnabled, false); } assertThat(actualItemCount).isEqualTo(expectedItemCount); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnException() throws Exception { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; try { createResponse = containerDirect.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = containerDirect.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("\"userAgent\":\"" + this.directClientUserAgent + "\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(diagnostics).contains("\"retryAfterInMs\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"exceptionMessage\":\"[\\\"Resource Not Found."); assertThat(diagnostics).contains("\"exceptionResponseHeaders\""); assertThat(diagnostics).doesNotContain("\"exceptionResponseHeaders\":null"); isValidJSON(diagnostics); validateTransportRequestTimelineDirect(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), directClient.asyncClient()); ObjectNode diagnosticsNode = (ObjectNode) OBJECT_MAPPER.readTree(diagnostics); JsonNode responseStatisticsList = diagnosticsNode.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); assertThat(storeResult).isNotNull(); JsonNode replicaStatusList = storeResult.get("replicaStatusList"); assertThat(replicaStatusList.isArray()).isTrue(); assertThat(replicaStatusList.size()).isGreaterThan(0); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnMetadataException() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); HttpClient mockHttpClient = Mockito.mock(HttpClient.class); Mockito.when(mockHttpClient.send(Mockito.any(HttpRequest.class), Mockito.any(Duration.class))) .thenReturn(Mono.error(new CosmosException(400, "TestBadRequest"))); RxStoreModel rxGatewayStoreModel = rxGatewayStoreModel = ReflectionUtils.getGatewayProxy((RxDocumentClientImpl) client.asyncClient().getDocClientWrapper()); ReflectionUtils.setGatewayHttpClient(rxGatewayStoreModel, mockHttpClient); container.createItem(internalObjectNode); fail("request should fail as bad request"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.BADREQUEST); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("\"exceptionMessage\":\"TestBadRequest\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(diagnostics).contains("\"resourceType\":\"DocumentCollection\""); assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); isValidJSON(diagnostics); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void supplementalResponseStatisticsList() throws Exception { ClientSideRequestStatistics clientSideRequestStatistics = new ClientSideRequestStatistics(mockDiagnosticsClientContext()); for (int i = 0; i < 15; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null, null); } List<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); ObjectMapper objectMapper = new ObjectMapper(); String diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); JsonNode jsonNode = objectMapper.readTree(diagnostics); ArrayNode supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(15); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(10); clearStoreResponseStatistics(clientSideRequestStatistics); storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); assertThat(storeResponseStatistics.size()).isEqualTo(0); for (int i = 0; i < 7; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null, null); } storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); objectMapper = new ObjectMapper(); diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); jsonNode = objectMapper.readTree(diagnostics); supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(7); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(7); for(JsonNode node : supplementalResponseStatisticsListNode) { assertThat(node.get("storeResult").asText()).isNotNull(); String requestResponseTimeUTC = node.get("requestResponseTimeUTC").asText(); Instant instant = Instant.from(RESPONSE_TIME_FORMATTER.parse(requestResponseTimeUTC)); assertThat(Instant.now().toEpochMilli() - instant.toEpochMilli()).isLessThan(5000); assertThat(node.get("requestResponseTimeUTC")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); assertThat(node.get("requestSessionToken")).isNotNull(); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void serializationOnVariousScenarios() { CosmosDatabaseResponse cosmosDatabase = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).read(); String diagnostics = cosmosDatabase.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"DATABASE_DESERIALIZATION\""); CosmosContainerResponse containerResponse = this.containerGateway.read(); diagnostics = containerResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"CONTAINER_DESERIALIZATION\""); TestItem testItem = new TestItem(); testItem.id = "TestId"; testItem.mypk = "TestPk"; CosmosItemResponse<TestItem> itemResponse = this.containerGateway.createItem(testItem); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); testItem.id = "TestId2"; testItem.mypk = "TestPk"; itemResponse = this.containerGateway.createItem(testItem, new PartitionKey("TestPk"), null); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).doesNotContain("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).doesNotContain("\"serializationType\":\"ITEM_DESERIALIZATION\""); TestItem readTestItem = itemResponse.getItem(); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); CosmosItemResponse<InternalObjectNode> readItemResponse = this.containerGateway.readItem(testItem.id, new PartitionKey(testItem.mypk), null, InternalObjectNode.class); InternalObjectNode properties = readItemResponse.getItem(); diagnostics = readItemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + this.gatewayClientUserAgent + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdRequestResponseLengthStatistics() throws Exception { TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); CosmosItemResponse<TestItem> createItemResponse = container.createItem(testItem); validate(createItemResponse.getDiagnostics(), testItemLength, ModelBridgeInternal.getPayloadLength(createItemResponse)); try { container.createItem(testItem); fail("expected to fail due to 409"); } catch (CosmosException e) { logger.info("Diagnostics are : {}", e.getDiagnostics()); String diagnostics = e.getDiagnostics().toString(); assertThat(diagnostics).contains("\"exceptionMessage\":\"[\\\"Resource with specified id or name already exists.\\\"]\""); assertThat(diagnostics).contains("\"exceptionResponseHeaders\""); assertThat(diagnostics).doesNotContain("\"exceptionResponseHeaders\": \"{}\""); validate(e.getDiagnostics(), testItemLength, 0); } CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } @Test(groups = {"simple"}, dataProvider = "connectionStateListenerArgProvider", timeOut = TIMEOUT) public void rntbdStatistics(boolean connectionStateListenerEnabled) throws Exception { Instant beforeClientInitialization = Instant.now(); CosmosClient client1 = null; try { DirectConnectionConfig connectionConfig = DirectConnectionConfig.getDefaultConfig(); connectionConfig.setConnectionEndpointRediscoveryEnabled(connectionStateListenerEnabled); client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .directMode(connectionConfig) .buildClient(); TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = client1.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); Thread.sleep(1000); Instant beforeInitializingRntbdServiceEndpoint = Instant.now(); CosmosItemResponse<TestItem> operation1Response = container.upsertItem(testItem); Instant afterInitializingRntbdServiceEndpoint = Instant.now(); Thread.sleep(1000); Instant beforeOperation2 = Instant.now(); CosmosItemResponse<TestItem> operation2Response = container.upsertItem(testItem); Instant afterOperation2 = Instant.now(); Thread.sleep(1000); Instant beforeOperation3 = Instant.now(); CosmosItemResponse<TestItem> operation3Response = container.upsertItem(testItem); Instant afterOperation3 = Instant.now(); validateRntbdStatistics(operation3Response.getDiagnostics(), beforeClientInitialization, beforeInitializingRntbdServiceEndpoint, afterInitializingRntbdServiceEndpoint, beforeOperation2, afterOperation2, beforeOperation3, afterOperation3, connectionStateListenerEnabled); CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } finally { LifeCycleUtils.closeQuietly(client1); } } private void validateRntbdStatistics(CosmosDiagnostics cosmosDiagnostics, Instant clientInitializationTime, Instant beforeInitializingRntbdServiceEndpoint, Instant afterInitializingRntbdServiceEndpoint, Instant beforeOperation2, Instant afterOperation2, Instant beforeOperation3, Instant afterOperation3, boolean connectionStateListenerEnabled) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); assertThat(storeResult).isNotNull(); JsonNode replicaStatusList = storeResult.get("replicaStatusList"); assertThat(replicaStatusList.isArray()).isTrue(); assertThat(replicaStatusList.size()).isGreaterThan(0); JsonNode serviceEndpointStatistics = storeResult.get("serviceEndpointStatistics"); assertThat(serviceEndpointStatistics).isNotNull(); assertThat(serviceEndpointStatistics.get("availableChannels").asInt(-1)).isGreaterThan(0); assertThat(serviceEndpointStatistics.get("acquiredChannels").asInt(-1)).isEqualTo(0); assertThat(serviceEndpointStatistics.get("inflightRequests").asInt(-1)).isEqualTo(1); assertThat(serviceEndpointStatistics.get("isClosed").asBoolean()).isEqualTo(false); JsonNode channelStatistics = storeResult.get("channelStatistics"); assertThat(channelStatistics).isNotNull(); assertThat(channelStatistics.get("channelId").asText()).isNotEmpty(); assertThat(channelStatistics.get("channelTaskQueueSize").asInt(-1)).isGreaterThanOrEqualTo(0); assertThat(channelStatistics.get("pendingRequestsCount").asInt(-1)).isGreaterThanOrEqualTo(0); assertThat(channelStatistics.get("lastReadTime").asText()).isNotEmpty(); assertThat(channelStatistics.get("waitForConnectionInit").asText()).isNotEmpty(); JsonNode connectionStateListenerMetrics = serviceEndpointStatistics.get("cerMetrics"); if (connectionStateListenerEnabled) { assertThat(connectionStateListenerMetrics).isNotNull(); assertThat(connectionStateListenerMetrics.get("lastCallTimestamp")).isNull(); assertThat(connectionStateListenerMetrics.get("lastActionableContext")).isNull(); } else { assertThat(connectionStateListenerMetrics).isNull(); } Instant beforeInitializationThreshold = beforeInitializingRntbdServiceEndpoint.minusMillis(1); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isAfterOrEqualTo(beforeInitializationThreshold); Instant afterInitializationThreshold = afterInitializingRntbdServiceEndpoint.plusMillis(2); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isBeforeOrEqualTo(afterInitializationThreshold); Instant afterOperation2Threshold = afterOperation2.plusMillis(2); Instant beforeOperation2Threshold = beforeOperation2.minusMillis(2); assertThat(Instant.parse(serviceEndpointStatistics.get("lastRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold.toString()) .isBeforeOrEqualTo(afterOperation2Threshold.toString()); assertThat(Instant.parse(serviceEndpointStatistics.get("lastSuccessfulRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold.toString()) .isBeforeOrEqualTo(afterOperation2Threshold.toString()); } private void validate(CosmosDiagnostics cosmosDiagnostics, int expectedRequestPayloadSize, int expectedResponsePayloadSize) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); boolean hasPayload = storeResult.get("exceptionMessage") == null; assertThat(storeResult).isNotNull(); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("requestPayloadLengthInBytes").asInt(-1)).isEqualTo(expectedRequestPayloadSize); if (hasPayload) { assertThat(storeResult.get("responsePayloadLengthInBytes").asInt(-1)).isEqualTo(expectedResponsePayloadSize); } assertThat(storeResult.get("rntbdResponseLengthInBytes").asInt(-1)).isGreaterThan(expectedResponsePayloadSize); } @Test(groups = {"emulator"}, timeOut = TIMEOUT) public void addressResolutionStatistics() { CosmosClient client1 = null; CosmosClient client2 = null; String databaseId = DatabaseForTest.generateId(); String containerId = UUID.randomUUID().toString(); CosmosDatabase cosmosDatabase = null; CosmosContainer cosmosContainer = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); client1.createDatabase(databaseId); cosmosDatabase = client1.getDatabase(databaseId); cosmosDatabase.createContainer(containerId, "/mypk"); InternalObjectNode internalObjectNode = getInternalObjectNode(); cosmosContainer = cosmosDatabase.getContainer(containerId); CosmosItemResponse<InternalObjectNode> writeResourceResponse = cosmosContainer.createItem(internalObjectNode); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"exceptionMessage\":null"); client2 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosDatabase = client2.getDatabase(databaseId); cosmosContainer = cosmosDatabase.getContainer(containerId); AsyncDocumentClient asyncDocumentClient = client2.asyncClient().getContextClient(); GlobalAddressResolver addressResolver = (GlobalAddressResolver) FieldUtils.readField(asyncDocumentClient, "addressResolver", true); @SuppressWarnings("rawtypes") Map addressCacheByEndpoint = (Map) FieldUtils.readField(addressResolver, "addressCacheByEndpoint", true); Object endpointCache = addressCacheByEndpoint.values().toArray()[0]; GatewayAddressCache addressCache = (GatewayAddressCache) FieldUtils.readField(endpointCache, "addressCache", true); HttpClient httpClient = httpClient(true); FieldUtils.writeField(addressCache, "httpClient", httpClient, true); new Thread(() -> { try { Thread.sleep(5000); HttpClient httpClient1 = httpClient(false); FieldUtils.writeField(addressCache, "httpClient", httpClient1, true); } catch (Exception e) { fail(e.getMessage()); } }).start(); PartitionKey partitionKey = new PartitionKey(internalObjectNode.get("mypk")); CosmosItemResponse<InternalObjectNode> readResourceResponse = cosmosContainer.readItem(internalObjectNode.getId(), partitionKey, new CosmosItemRequestOptions(), InternalObjectNode.class); assertThat(readResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(readResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"exceptionMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused"); } catch (Exception ex) { logger.error("Error in test addressResolutionStatistics", ex); fail("This test should not throw exception " + ex); } finally { safeDeleteSyncDatabase(cosmosDatabase); if (client1 != null) { client1.close(); } if (client2 != null) { client2.close(); } } } private InternalObjectNode getInternalObjectNode() { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", uuid); return internalObjectNode; } private InternalObjectNode getInternalObjectNode(String pkValue) { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", pkValue); return internalObjectNode; } private List<ClientSideRequestStatistics.StoreResponseStatistics> getStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); @SuppressWarnings({"unchecked"}) List<ClientSideRequestStatistics.StoreResponseStatistics> list = (List<ClientSideRequestStatistics.StoreResponseStatistics>) storeResponseStatisticsField.get(requestStatistics); return list; } private void clearStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); storeResponseStatisticsField.set(requestStatistics, new ArrayList<ClientSideRequestStatistics.StoreResponseStatistics>()); } private void validateTransportRequestTimelineGateway(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"requestSent\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"received\""); } private void validateTransportRequestTimelineDirect(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"created\""); assertThat(diagnostics).contains("\"eventName\":\"queued\""); assertThat(diagnostics).contains("\"eventName\":\"channelAcquisitionStarted\""); assertThat(diagnostics).contains("\"eventName\":\"pipelined\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"decodeTime"); assertThat(diagnostics).contains("\"eventName\":\"received\""); assertThat(diagnostics).contains("\"eventName\":\"completed\""); assertThat(diagnostics).contains("\"startTimeUTC\""); assertThat(diagnostics).contains("\"durationInMilliSecs\""); } public void isValidJSON(final String json) { try { final JsonParser parser = new JsonFactory().createParser(json); while (parser.nextToken() != null) { } } catch (IOException ex) { fail("Diagnostic string is not in json format ", ex); } } private HttpClient httpClient(boolean fakeProxy) { HttpClientConfig httpClientConfig; if(fakeProxy) { httpClientConfig = new HttpClientConfig(new Configs()) .withProxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 8888))); } else { httpClientConfig = new HttpClientConfig(new Configs()); } return HttpClient.createFixed(httpClientConfig); } private IndexUtilizationInfo createFromJSONString(String jsonString) { ObjectMapper indexUtilizationInfoObjectMapper = new ObjectMapper(); IndexUtilizationInfo indexUtilizationInfo = null; try { indexUtilizationInfo = indexUtilizationInfoObjectMapper.readValue(jsonString, IndexUtilizationInfo.class); } catch (JsonProcessingException e) { logger.error("Json not correctly formed ", e); } return indexUtilizationInfo; } private void validateRegionContacted(CosmosDiagnostics cosmosDiagnostics, CosmosAsyncClient cosmosAsyncClient) throws Exception { RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) ReflectionUtils.getAsyncDocumentClient(cosmosAsyncClient); GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); LocationCache locationCache = ReflectionUtils.getLocationCache(globalEndpointManager); Field locationInfoField = LocationCache.class.getDeclaredField("locationInfo"); locationInfoField.setAccessible(true); Object locationInfo = locationInfoField.get(locationCache); Class<?> DatabaseAccountLocationsInfoClass = Class.forName("com.azure.cosmos.implementation.routing" + ".LocationCache$DatabaseAccountLocationsInfo"); Field availableWriteEndpointByLocation = DatabaseAccountLocationsInfoClass.getDeclaredField( "availableWriteEndpointByLocation"); availableWriteEndpointByLocation.setAccessible(true); @SuppressWarnings("unchecked") Map<String, URI> map = (Map<String, URI>) availableWriteEndpointByLocation.get(locationInfo); String regionName = map.keySet().iterator().next(); assertThat(cosmosDiagnostics.getContactedRegionNames().size()).isEqualTo(1); assertThat(cosmosDiagnostics.getContactedRegionNames().iterator().next()).isEqualTo(regionName.toLowerCase()); } private void validateChannelStatistics(CosmosDiagnostics cosmosDiagnostics) { for (ClientSideRequestStatistics clientSideRequestStatistics : cosmosDiagnostics.getClientSideRequestStatistics()) { for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics : clientSideRequestStatistics.getResponseStatisticsList()) { assertThat(storeResponseStatistics).isNotNull(); RntbdChannelStatistics rntbdChannelStatistics = storeResponseStatistics .getStoreResult() .getStoreResponseDiagnostics() .getRntbdChannelStatistics(); assertThat(rntbdChannelStatistics).isNotNull(); try { String rntbdChannelStatisticsString = Utils.getSimpleObjectMapper().writeValueAsString(rntbdChannelStatistics); assertThat(rntbdChannelStatisticsString).contains("\"channelId\":\"" + rntbdChannelStatistics.getChannelId() + "\""); assertThat(rntbdChannelStatisticsString) .contains("\"channelTaskQueueSize\":" + rntbdChannelStatistics.getChannelTaskQueueSize()); assertThat(rntbdChannelStatisticsString) .contains("\"pendingRequestsCount\":" + rntbdChannelStatistics.getPendingRequestsCount()); assertThat(rntbdChannelStatisticsString) .contains("\"lastReadTime\":\"" + rntbdChannelStatistics.getLastReadTime() + "\""); if (rntbdChannelStatistics.getTransitTimeoutCount() > 0) { assertThat(rntbdChannelStatisticsString) .contains("\"transitTimeoutCount\":" + rntbdChannelStatistics.getTransitTimeoutCount()); assertThat(rntbdChannelStatisticsString) .contains("\"transitTimeoutStartingTime\":\"" + rntbdChannelStatistics.getTransitTimeoutStartingTime() + "\""); } else { assertThat(rntbdChannelStatisticsString) .doesNotContain("\"transitTimeoutCount\":" + rntbdChannelStatistics.getTransitTimeoutCount()); assertThat(rntbdChannelStatisticsString) .doesNotContain("\"transitTimeoutStartingTime\":\"" + rntbdChannelStatistics.getTransitTimeoutStartingTime() + "\""); } assertThat(rntbdChannelStatisticsString).contains("\"waitForConnectionInit\":" + rntbdChannelStatistics.isWaitForConnectionInit()); } catch (JsonProcessingException e) { fail("Failed to parse RntbdChannelStatistics"); } } } } public static class TestItem { public String id; public String mypk; public TestItem() { } } }
class CosmosDiagnosticsTest extends TestSuiteBase { private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final String USER_AGENT_SUFFIX_GATEWAY_CLIENT = "gatewayClientSuffix"; private static final String USER_AGENT_SUFFIX_DIRECT_CLIENT = "directClientSuffix"; private static final DateTimeFormatter RESPONSE_TIME_FORMATTER = DateTimeFormatter.ISO_INSTANT; private static final String tempMachineId = getTempMachineId(); private CosmosClient gatewayClient; private CosmosClient directClient; private CosmosAsyncDatabase cosmosAsyncDatabase; private CosmosContainer containerGateway; private CosmosContainer containerDirect; private CosmosAsyncContainer cosmosAsyncContainer; private String gatewayClientUserAgent; private String directClientUserAgent; private static String getTempMachineId() { Field field = null; try { field = RxDocumentClientImpl.class.getDeclaredField("tempMachineId"); } catch (NoSuchFieldException e) { fail(e.toString()); } field.setAccessible(true); try { return (String)field.get(null); } catch (IllegalAccessException e) { fail(e.toString()); return null; } } @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() { assertThat(this.gatewayClient).isNull(); gatewayClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .userAgentSuffix(USER_AGENT_SUFFIX_GATEWAY_CLIENT) .gatewayMode() .buildClient(); UserAgentContainer userAgentContainer = new UserAgentContainer(); userAgentContainer.setSuffix(USER_AGENT_SUFFIX_GATEWAY_CLIENT); this.gatewayClientUserAgent = userAgentContainer.getUserAgent(); directClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .userAgentSuffix(USER_AGENT_SUFFIX_DIRECT_CLIENT) .directMode() .buildClient(); userAgentContainer.setSuffix(USER_AGENT_SUFFIX_DIRECT_CLIENT); this.directClientUserAgent = userAgentContainer.getUserAgent(); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(this.gatewayClient.asyncClient()); cosmosAsyncDatabase = directClient.asyncClient().getDatabase(cosmosAsyncContainer.getDatabase().getId()); containerGateway = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); containerDirect = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { if (this.gatewayClient != null) { this.gatewayClient.close(); } if (this.directClient != null) { this.directClient.close(); } } @DataProvider(name = "query") private Object[][] query() { return new Object[][]{ new Object[] { "Select * from c where c.id = 'wrongId'", true }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", true }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", true }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", true }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId'", false }, new Object[] { "Select top 1 * from c where c.id = 'wrongId'", false }, new Object[] { "Select * from c where c.id = 'wrongId' order by c.id", false }, new Object[] { "Select count(1) from c where c.id = 'wrongId' group by c.pk", false }, new Object[] { "Select distinct c.pk from c where c.id = 'wrongId'", false }, }; } @DataProvider(name = "readAllItemsOfLogicalPartition") private Object[][] readAllItemsOfLogicalPartition() { return new Object[][]{ new Object[] { 1, true }, new Object[] { 5, null }, new Object[] { 20, null }, new Object[] { 1, false }, new Object[] { 5, false }, new Object[] { 20, false }, }; } @DataProvider(name = "connectionStateListenerArgProvider") public Object[][] connectionStateListenerArgProvider() { return new Object[][]{ {true}, {false} }; } @Test(groups = {"simple"}, timeOut = TIMEOUT) @Test(groups = {"simple"}, timeOut = TIMEOUT) public void gatewayDiagnosticsOnException() throws Exception { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; try { createResponse = this.containerGateway.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = this.containerGateway.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).contains("\"userAgent\":\"" + this.gatewayClientUserAgent + "\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"statusCode\":404"); assertThat(diagnostics).contains("\"operationType\":\"Read\""); assertThat(diagnostics).contains("\"userAgent\":\"" + this.gatewayClientUserAgent + "\""); assertThat(diagnostics).contains("\"exceptionMessage\":\"Entity with the specified id does not exist in the system."); assertThat(diagnostics).contains("\"exceptionResponseHeaders\""); assertThat(diagnostics).doesNotContain("\"exceptionResponseHeaders\": \"{}\""); assertThat(diagnostics).containsAnyOf( "\"machineId\":\"" + tempMachineId + "\"", "\"machineId\":\"" + ClientTelemetry.getMachineId(null) + "\"" ); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(createResponse.getDiagnostics().getContactedRegionNames()).isNotNull(); validateRegionContacted(createResponse.getDiagnostics(), this.containerGateway.asyncContainer.getDatabase().getClient()); assertThat(createResponse.getDiagnostics().getRegionsContacted()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); validateTransportRequestTimelineGateway(diagnostics); isValidJSON(diagnostics); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void systemDiagnosticsForSystemStateInformation() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = this.containerGateway.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("systemInformation"); assertThat(diagnostics).contains("usedMemory"); assertThat(diagnostics).contains("availableMemory"); assertThat(diagnostics).contains("systemCpuLoad"); assertThat(diagnostics).contains("\"userAgent\":\"" + this.gatewayClientUserAgent + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(createResponse.getDiagnostics().getDuration()).isNotNull(); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnostics() throws Exception { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = containerDirect.createItem(internalObjectNode); validateDirectModeDiagnosticsOnSuccess(createResponse.getDiagnostics(), directClient, this.directClientUserAgent); try { containerDirect.createItem(internalObjectNode); fail("expected 409"); } catch (CosmosException e) { validateDirectModeDiagnosticsOnException(e.getDiagnostics(), this.directClientUserAgent); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void requestSessionTokenDiagnostics() { CosmosClient testSessionTokenClient = null; try { testSessionTokenClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .consistencyLevel(ConsistencyLevel.SESSION) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer cosmosContainer = testSessionTokenClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"requestSessionToken\":null"); String sessionToken = createResponse.getSessionToken(); CosmosItemResponse<InternalObjectNode> readResponse = cosmosContainer.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey(BridgeInternal.getProperties(createResponse).getId()), InternalObjectNode.class); diagnostics = readResponse.getDiagnostics().toString(); assertThat(diagnostics).contains(String.format("\"requestSessionToken\":\"%s\"", sessionToken)); CosmosBatch batch = CosmosBatch.createCosmosBatch(new PartitionKey( BridgeInternal.getProperties(createResponse).getId())); internalObjectNode = getInternalObjectNode(); batch.createItemOperation(internalObjectNode); CosmosBatchResponse batchResponse = cosmosContainer.executeCosmosBatch(batch, new CosmosBatchRequestOptions().setSessionToken(readResponse.getSessionToken())); diagnostics = batchResponse.getDiagnostics().toString(); assertThat(diagnostics).contains(String.format("\"requestSessionToken\":\"%s\"", readResponse.getSessionToken())); } finally { if (testSessionTokenClient != null) { testSessionTokenClient.close(); } } } @Test(groups = {"simple"}) public void databaseAccountToClients() { CosmosClient testClient = null; try { testClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer cosmosContainer = testClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = cosmosContainer.createItem(internalObjectNode); String diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains(String.format("\"clientEndpoints\"" + ":{\"%s\"", TestConfigurations.HOST)); int clientsIndex = diagnostics.indexOf("\"clientEndpoints\":"); String[] substrings = diagnostics.substring(clientsIndex, clientsIndex + 120) .split("}")[0].split(":"); String intString = substrings[substrings.length-1]; int intValue = Integer.parseInt(intString); CosmosClient testClient2 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); internalObjectNode = getInternalObjectNode(); createResponse = cosmosContainer.createItem(internalObjectNode); diagnostics = createResponse.getDiagnostics().toString(); assertThat(diagnostics).contains(String.format("\"clientEndpoints\"" + ":{\"%s\"", TestConfigurations.HOST)); clientsIndex = diagnostics.indexOf("\"clientEndpoints\":"); substrings = diagnostics.substring(clientsIndex, clientsIndex + 120) .split("}")[0].split(":"); intString = substrings[substrings.length-1]; assertThat(Integer.parseInt(intString)).isEqualTo(intValue+1); testClient2.close(); } finally { if (testClient != null) { testClient.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryPlanDiagnostics() throws JsonProcessingException { List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = containerDirect.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); queryList.add("Select * from c"); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); queryList.add("Select * from c where c.id = 'wrongId'"); for(String query : queryList) { int feedResponseCounter = 0; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = containerDirect.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); if (feedResponseCounter == 0) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); String requestTimeLine = OBJECT_MAPPER.writeValueAsString(feedResponse.getCosmosDiagnostics().getFeedResponseDiagnostics().getQueryPlanDiagnosticsContext().getRequestTimeline()); assertThat(requestTimeLine).contains("connectionConfigured"); assertThat(requestTimeLine).contains("requestSent"); assertThat(requestTimeLine).contains("transitTime"); assertThat(requestTimeLine).contains("received"); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan RequestTimeline ="); } feedResponseCounter++; } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void readManyDiagnostics() { String pkValue = UUID.randomUUID().toString(); PartitionKey partitionKey = new PartitionKey(pkValue); List<CosmosItemIdentity> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(pkValue); CosmosItemResponse<InternalObjectNode> createResponse = containerDirect.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(new CosmosItemIdentity(partitionKey, internalObjectNode.getId())); } } FeedResponse<InternalObjectNode> response = containerDirect.readMany(itemIdList, InternalObjectNode.class); FeedResponseDiagnostics diagnostics = response.getCosmosDiagnostics().getFeedResponseDiagnostics(); assertThat(diagnostics.getClientSideRequestStatistics().size()).isEqualTo(1); assertThat(diagnostics.getQueryMetricsMap().values().iterator().next().getRetrievedDocumentCount()).isEqualTo(itemIdList.size()); String cosmosDiagnosticsString = response.getCosmosDiagnostics().toString(); assertThat(cosmosDiagnosticsString).contains("\"userAgent\":\"" + this.directClientUserAgent + "\""); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithIndexMetrics() { List<String> itemIdList = new ArrayList<>(); for(int i = 0; i< 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = containerDirect.createItem(internalObjectNode); if(i%20 == 0) { itemIdList.add(internalObjectNode.getId()); } } String queryDiagnostics = null; List<String> queryList = new ArrayList<>(); StringBuilder queryBuilder = new StringBuilder("SELECT * from c where c.mypk in ("); for(int i = 0 ; i < itemIdList.size(); i++){ queryBuilder.append("'").append(itemIdList.get(i)).append("'"); if(i < (itemIdList.size()-1)) { queryBuilder.append(","); } else { queryBuilder.append(")"); } } queryList.add(queryBuilder.toString()); for (String query : queryList) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(true); options.setIndexMetricsEnabled(true); Iterator<FeedResponse<InternalObjectNode>> iterator = containerDirect.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); logger.info("This is query diagnostics {}", queryDiagnostics); if (feedResponse.getResponseHeaders().containsKey(HttpConstants.HttpHeaders.INDEX_UTILIZATION)) { assertThat(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION)).isNotNull(); assertThat(createFromJSONString(Utils.decodeBase64String(feedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.INDEX_UTILIZATION))).getUtilizedSingleIndexes()).isNotNull(); } } } } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT) public void queryMetrics(String query, Boolean qmEnabled) { CosmosContainer directContainer = this.directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()) .getContainer(this.cosmosAsyncContainer.getId()); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } boolean qroupByFirstResponse = true; Iterator<FeedResponse<InternalObjectNode>> iterator = directContainer.queryItems(query, options, InternalObjectNode.class).iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateDirectModeQueryDiagnostics(queryDiagnostics, this.directClientUserAgent); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryDiagnosticsOnOrderBy() { String containerId = "testcontainer"; cosmosAsyncDatabase.createContainer(containerId, "/mypk", ThroughputProperties.createManualThroughput(40000)).block(); CosmosAsyncContainer testcontainer = cosmosAsyncDatabase.getContainer(containerId); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setConsistencyLevel(ConsistencyLevel.EVENTUAL); testcontainer.createItem(getInternalObjectNode()).block(); options.setMaxDegreeOfParallelism(-1); String query = "SELECT * from c ORDER BY c._ts DESC"; CosmosPagedFlux<InternalObjectNode> cosmosPagedFlux = testcontainer.queryItems(query, options, InternalObjectNode.class); Set<String> partitionKeyRangeIds = new HashSet<>(); Set<String> pkRids = new HashSet<>(); cosmosPagedFlux.byPage().flatMap(feedResponse -> { String cosmosDiagnosticsString = feedResponse.getCosmosDiagnostics().toString(); Pattern pattern = Pattern.compile("(\"partitionKeyRangeId\":\")(\\d)"); Matcher matcher = pattern.matcher(cosmosDiagnosticsString); while (matcher.find()) { String group = matcher.group(2); partitionKeyRangeIds.add(group); } pattern = Pattern.compile("(pkrId:)(\\d)"); matcher = pattern.matcher(cosmosDiagnosticsString); while (matcher.find()) { String group = matcher.group(2); pkRids.add(group); } return Flux.just(feedResponse); }).blockLast(); assertThat(pkRids).isNotEmpty(); assertThat(pkRids).isEqualTo(partitionKeyRangeIds); deleteCollection(testcontainer); } private void validateDirectModeDiagnosticsOnSuccess( CosmosDiagnostics cosmosDiagnostics, CosmosClient testDirectClient, String userAgent) throws Exception { String diagnostics = cosmosDiagnostics.toString(); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"metaDataName\":\"CONTAINER_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"PARTITION_KEY_RANGE_LOOK_UP\""); assertThat(diagnostics).contains("\"metaDataName\":\"SERVER_ADDRESS_LOOKUP\""); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + userAgent + "\""); assertThat(diagnostics).containsAnyOf( "\"machineId\":\"" + tempMachineId + "\"", "\"machineId\":\"" + ClientTelemetry.getMachineId(null) + "\"" ); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(diagnostics).contains("\"retryAfterInMs\""); assertThat(diagnostics).contains("\"channelStatistics\""); assertThat(cosmosDiagnostics.getContactedRegionNames()).isNotEmpty(); assertThat(cosmosDiagnostics.getDuration()).isNotNull(); validateTransportRequestTimelineDirect(diagnostics); validateRegionContacted(cosmosDiagnostics, testDirectClient.asyncClient()); validateChannelStatistics(cosmosDiagnostics); isValidJSON(diagnostics); } private void validateDirectModeDiagnosticsOnException(CosmosDiagnostics cosmosDiagnostics, String userAgent) { String diagnosticsString = cosmosDiagnostics.toString(); assertThat(diagnosticsString).contains("\"backendLatencyInMs\""); assertThat(diagnosticsString).contains("\"userAgent\":\"" + userAgent + "\""); assertThat(diagnosticsString).contains("\"retryAfterInMs\""); assertThat(diagnosticsString).contains("\"exceptionMessage\":\"[\\\"Resource with specified id or name already exists.\\\"]\""); assertThat(diagnosticsString).contains("\"exceptionResponseHeaders\""); assertThat(diagnosticsString).doesNotContain("\"exceptionResponseHeaders\": \"{}\""); validateTransportRequestTimelineDirect(diagnosticsString); validateChannelStatistics(cosmosDiagnostics); } private void validateDirectModeQueryDiagnostics(String diagnostics, String userAgent) { assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("supplementalResponseStatisticsList"); assertThat(diagnostics).contains("responseStatisticsList"); assertThat(diagnostics).contains("\"gatewayStatistics\":null"); assertThat(diagnostics).contains("addressResolutionStatistics"); assertThat(diagnostics).contains("\"userAgent\":\"" + userAgent + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); } private void validateGatewayModeQueryDiagnostics(String diagnostics, String userAgent) { assertThat(diagnostics).contains("\"connectionMode\":\"GATEWAY\""); assertThat(diagnostics).doesNotContain(("\"gatewayStatistics\":null")); assertThat(diagnostics).contains("\"operationType\":\"Query\""); assertThat(diagnostics).contains("\"userAgent\":\"" + userAgent + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"regionsContacted\""); } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT*2) public void queryDiagnosticsGatewayMode(String query, Boolean qmEnabled) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); List<String> itemIdList = new ArrayList<>(); for (int i = 0; i < 100; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(); containerGateway.createItem(internalObjectNode); if (i % 20 == 0) { itemIdList.add(internalObjectNode.getId()); } } boolean qroupByFirstResponse = true; if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } Iterator<FeedResponse<InternalObjectNode>> iterator = containerGateway .queryItems(query, options, InternalObjectNode.class) .iterableByPage() .iterator(); assertThat(iterator.hasNext()).isTrue(); while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); assertThat(feedResponse.getResults().size()).isEqualTo(0); if (!query.contains("group by") || qroupByFirstResponse) { validateQueryDiagnostics(queryDiagnostics, qmEnabled, true); validateGatewayModeQueryDiagnostics(queryDiagnostics, this.gatewayClientUserAgent); if (query.contains("group by")) { qroupByFirstResponse = false; } } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryMetricsWithADifferentLocale() { Locale.setDefault(Locale.GERMAN); String query = "select * from root where root.id= \"someid\""; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); Iterator<FeedResponse<InternalObjectNode>> iterator = this.containerGateway.queryItems(query, options, InternalObjectNode.class) .iterableByPage().iterator(); double requestCharge = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); requestCharge += feedResponse.getRequestCharge(); } assertThat(requestCharge).isGreaterThan(0); Locale.setDefault(Locale.ROOT); } private static void validateQueryDiagnostics( String queryDiagnostics, Boolean qmEnabled, boolean expectQueryPlanDiagnostics) { if (qmEnabled == null || qmEnabled) { assertThat(queryDiagnostics).contains("Retrieved Document Count"); assertThat(queryDiagnostics).contains("Query Preparation Times"); assertThat(queryDiagnostics).contains("Runtime Execution Times"); assertThat(queryDiagnostics).contains("Partition Execution Timeline"); } else { assertThat(queryDiagnostics).doesNotContain("Retrieved Document Count"); assertThat(queryDiagnostics).doesNotContain("Query Preparation Times"); assertThat(queryDiagnostics).doesNotContain("Runtime Execution Times"); assertThat(queryDiagnostics).doesNotContain("Partition Execution Timeline"); } if (expectQueryPlanDiagnostics) { assertThat(queryDiagnostics).contains("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).contains("QueryPlan Duration (ms)="); } else { assertThat(queryDiagnostics).doesNotContain("QueryPlan Start Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan End Time (UTC)="); assertThat(queryDiagnostics).doesNotContain("QueryPlan Duration (ms)="); } } @Test(groups = {"simple"}, dataProvider = "readAllItemsOfLogicalPartition", timeOut = TIMEOUT) public void queryMetricsForReadAllItemsOfLogicalPartition(Integer expectedItemCount, Boolean qmEnabled) { String pkValue = UUID.randomUUID().toString(); for (int i = 0; i < expectedItemCount; i++) { InternalObjectNode internalObjectNode = getInternalObjectNode(pkValue); CosmosItemResponse<InternalObjectNode> createResponse = containerGateway.createItem(internalObjectNode); } CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options = options.setQueryMetricsEnabled(qmEnabled); } ModelBridgeInternal.setQueryRequestOptionsMaxItemCount(options, 5); Iterator<FeedResponse<InternalObjectNode>> iterator = this.containerGateway .readAllItems( new PartitionKey(pkValue), options, InternalObjectNode.class) .iterableByPage().iterator(); assertThat(iterator.hasNext()).isTrue(); int actualItemCount = 0; while (iterator.hasNext()) { FeedResponse<InternalObjectNode> feedResponse = iterator.next(); String queryDiagnostics = feedResponse.getCosmosDiagnostics().toString(); actualItemCount += feedResponse.getResults().size(); validateQueryDiagnostics(queryDiagnostics, qmEnabled, false); } assertThat(actualItemCount).isEqualTo(expectedItemCount); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnException() throws Exception { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosItemResponse<InternalObjectNode> createResponse = null; try { createResponse = containerDirect.createItem(internalObjectNode); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); ModelBridgeInternal.setPartitionKey(cosmosItemRequestOptions, new PartitionKey("wrongPartitionKey")); CosmosItemResponse<InternalObjectNode> readResponse = containerDirect.readItem(BridgeInternal.getProperties(createResponse).getId(), new PartitionKey("wrongPartitionKey"), InternalObjectNode.class); fail("request should fail as partition key is wrong"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("\"userAgent\":\"" + this.directClientUserAgent + "\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); assertThat(diagnostics).contains("\"backendLatencyInMs\""); assertThat(diagnostics).contains("\"retryAfterInMs\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); assertThat(diagnostics).contains("\"exceptionMessage\":\"[\\\"Resource Not Found."); assertThat(diagnostics).contains("\"exceptionResponseHeaders\""); assertThat(diagnostics).doesNotContain("\"exceptionResponseHeaders\":null"); isValidJSON(diagnostics); validateTransportRequestTimelineDirect(diagnostics); validateRegionContacted(createResponse.getDiagnostics(), directClient.asyncClient()); ObjectNode diagnosticsNode = (ObjectNode) OBJECT_MAPPER.readTree(diagnostics); JsonNode responseStatisticsList = diagnosticsNode.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); assertThat(storeResult).isNotNull(); JsonNode replicaStatusList = storeResult.get("replicaStatusList"); assertThat(replicaStatusList.isArray()).isTrue(); assertThat(replicaStatusList.size()).isGreaterThan(0); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void directDiagnosticsOnMetadataException() { InternalObjectNode internalObjectNode = getInternalObjectNode(); CosmosClient client = null; try { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); CosmosContainer container = client.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); HttpClient mockHttpClient = Mockito.mock(HttpClient.class); Mockito.when(mockHttpClient.send(Mockito.any(HttpRequest.class), Mockito.any(Duration.class))) .thenReturn(Mono.error(new CosmosException(400, "TestBadRequest"))); RxStoreModel rxGatewayStoreModel = rxGatewayStoreModel = ReflectionUtils.getGatewayProxy((RxDocumentClientImpl) client.asyncClient().getDocClientWrapper()); ReflectionUtils.setGatewayHttpClient(rxGatewayStoreModel, mockHttpClient); container.createItem(internalObjectNode); fail("request should fail as bad request"); } catch (CosmosException exception) { isValidJSON(exception.toString()); isValidJSON(exception.getMessage()); String diagnostics = exception.getDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.BADREQUEST); assertThat(diagnostics).contains("\"connectionMode\":\"DIRECT\""); assertThat(diagnostics).contains("\"exceptionMessage\":\"TestBadRequest\""); assertThat(diagnostics).doesNotContain(("\"resourceAddress\":null")); assertThat(diagnostics).contains("\"resourceType\":\"DocumentCollection\""); assertThat(exception.getDiagnostics().getContactedRegionNames()).isNotEmpty(); assertThat(exception.getDiagnostics().getDuration()).isNotNull(); isValidJSON(diagnostics); } finally { if (client != null) { client.close(); } } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void supplementalResponseStatisticsList() throws Exception { ClientSideRequestStatistics clientSideRequestStatistics = new ClientSideRequestStatistics(mockDiagnosticsClientContext()); for (int i = 0; i < 15; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null, null); } List<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); ObjectMapper objectMapper = new ObjectMapper(); String diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); JsonNode jsonNode = objectMapper.readTree(diagnostics); ArrayNode supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(15); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(10); clearStoreResponseStatistics(clientSideRequestStatistics); storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); assertThat(storeResponseStatistics.size()).isEqualTo(0); for (int i = 0; i < 7; i++) { RxDocumentServiceRequest rxDocumentServiceRequest = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), OperationType.Head, ResourceType.Document); clientSideRequestStatistics.recordResponse(rxDocumentServiceRequest, null, null); } storeResponseStatistics = getStoreResponseStatistics(clientSideRequestStatistics); objectMapper = new ObjectMapper(); diagnostics = objectMapper.writeValueAsString(clientSideRequestStatistics); jsonNode = objectMapper.readTree(diagnostics); supplementalResponseStatisticsListNode = (ArrayNode) jsonNode.get("supplementalResponseStatisticsList"); assertThat(storeResponseStatistics.size()).isEqualTo(7); assertThat(supplementalResponseStatisticsListNode.size()).isEqualTo(7); for(JsonNode node : supplementalResponseStatisticsListNode) { assertThat(node.get("storeResult").asText()).isNotNull(); String requestResponseTimeUTC = node.get("requestResponseTimeUTC").asText(); Instant instant = Instant.from(RESPONSE_TIME_FORMATTER.parse(requestResponseTimeUTC)); assertThat(Instant.now().toEpochMilli() - instant.toEpochMilli()).isLessThan(5000); assertThat(node.get("requestResponseTimeUTC")).isNotNull(); assertThat(node.get("requestOperationType")).isNotNull(); assertThat(node.get("requestSessionToken")).isNotNull(); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void serializationOnVariousScenarios() { CosmosDatabaseResponse cosmosDatabase = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).read(); String diagnostics = cosmosDatabase.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"DATABASE_DESERIALIZATION\""); CosmosContainerResponse containerResponse = this.containerGateway.read(); diagnostics = containerResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"CONTAINER_DESERIALIZATION\""); TestItem testItem = new TestItem(); testItem.id = "TestId"; testItem.mypk = "TestPk"; CosmosItemResponse<TestItem> itemResponse = this.containerGateway.createItem(testItem); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); testItem.id = "TestId2"; testItem.mypk = "TestPk"; itemResponse = this.containerGateway.createItem(testItem, new PartitionKey("TestPk"), null); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).doesNotContain("\"serializationType\":\"PARTITION_KEY_FETCH_SERIALIZATION\""); assertThat(diagnostics).doesNotContain("\"serializationType\":\"ITEM_DESERIALIZATION\""); TestItem readTestItem = itemResponse.getItem(); diagnostics = itemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); CosmosItemResponse<InternalObjectNode> readItemResponse = this.containerGateway.readItem(testItem.id, new PartitionKey(testItem.mypk), null, InternalObjectNode.class); InternalObjectNode properties = readItemResponse.getItem(); diagnostics = readItemResponse.getDiagnostics().toString(); assertThat(diagnostics).contains("\"serializationType\":\"ITEM_DESERIALIZATION\""); assertThat(diagnostics).contains("\"userAgent\":\"" + this.gatewayClientUserAgent + "\""); assertThat(diagnostics).containsPattern("(?s).*?\"activityId\":\"[^\\s\"]+\".*"); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void rntbdRequestResponseLengthStatistics() throws Exception { TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = directClient.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); CosmosItemResponse<TestItem> createItemResponse = container.createItem(testItem); validate(createItemResponse.getDiagnostics(), testItemLength, ModelBridgeInternal.getPayloadLength(createItemResponse)); try { container.createItem(testItem); fail("expected to fail due to 409"); } catch (CosmosException e) { logger.info("Diagnostics are : {}", e.getDiagnostics()); String diagnostics = e.getDiagnostics().toString(); assertThat(diagnostics).contains("\"exceptionMessage\":\"[\\\"Resource with specified id or name already exists.\\\"]\""); assertThat(diagnostics).contains("\"exceptionResponseHeaders\""); assertThat(diagnostics).doesNotContain("\"exceptionResponseHeaders\": \"{}\""); validate(e.getDiagnostics(), testItemLength, 0); } CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } @Test(groups = {"simple"}, dataProvider = "connectionStateListenerArgProvider", timeOut = TIMEOUT) public void rntbdStatistics(boolean connectionStateListenerEnabled) throws Exception { Instant beforeClientInitialization = Instant.now(); CosmosClient client1 = null; try { DirectConnectionConfig connectionConfig = DirectConnectionConfig.getDefaultConfig(); connectionConfig.setConnectionEndpointRediscoveryEnabled(connectionStateListenerEnabled); client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .directMode(connectionConfig) .buildClient(); TestItem testItem = new TestItem(); testItem.id = UUID.randomUUID().toString(); testItem.mypk = UUID.randomUUID().toString(); int testItemLength = OBJECT_MAPPER.writeValueAsBytes(testItem).length; CosmosContainer container = client1.getDatabase(this.cosmosAsyncContainer.getDatabase().getId()).getContainer(this.cosmosAsyncContainer.getId()); Thread.sleep(1000); Instant beforeInitializingRntbdServiceEndpoint = Instant.now(); CosmosItemResponse<TestItem> operation1Response = container.upsertItem(testItem); Instant afterInitializingRntbdServiceEndpoint = Instant.now(); Thread.sleep(1000); Instant beforeOperation2 = Instant.now(); CosmosItemResponse<TestItem> operation2Response = container.upsertItem(testItem); Instant afterOperation2 = Instant.now(); Thread.sleep(1000); Instant beforeOperation3 = Instant.now(); CosmosItemResponse<TestItem> operation3Response = container.upsertItem(testItem); Instant afterOperation3 = Instant.now(); validateRntbdStatistics(operation3Response.getDiagnostics(), beforeClientInitialization, beforeInitializingRntbdServiceEndpoint, afterInitializingRntbdServiceEndpoint, beforeOperation2, afterOperation2, beforeOperation3, afterOperation3, connectionStateListenerEnabled); CosmosItemResponse<TestItem> readItemResponse = container.readItem(testItem.id, new PartitionKey(testItem.mypk), TestItem.class); validate(readItemResponse.getDiagnostics(), 0, ModelBridgeInternal.getPayloadLength(readItemResponse)); CosmosItemResponse<Object> deleteItemResponse = container.deleteItem(testItem, null); validate(deleteItemResponse.getDiagnostics(), 0, 0); } finally { LifeCycleUtils.closeQuietly(client1); } } private void validateRntbdStatistics(CosmosDiagnostics cosmosDiagnostics, Instant clientInitializationTime, Instant beforeInitializingRntbdServiceEndpoint, Instant afterInitializingRntbdServiceEndpoint, Instant beforeOperation2, Instant afterOperation2, Instant beforeOperation3, Instant afterOperation3, boolean connectionStateListenerEnabled) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); assertThat(storeResult).isNotNull(); JsonNode replicaStatusList = storeResult.get("replicaStatusList"); assertThat(replicaStatusList.isArray()).isTrue(); assertThat(replicaStatusList.size()).isGreaterThan(0); JsonNode serviceEndpointStatistics = storeResult.get("serviceEndpointStatistics"); assertThat(serviceEndpointStatistics).isNotNull(); assertThat(serviceEndpointStatistics.get("availableChannels").asInt(-1)).isGreaterThan(0); assertThat(serviceEndpointStatistics.get("acquiredChannels").asInt(-1)).isEqualTo(0); assertThat(serviceEndpointStatistics.get("inflightRequests").asInt(-1)).isEqualTo(1); assertThat(serviceEndpointStatistics.get("isClosed").asBoolean()).isEqualTo(false); JsonNode channelStatistics = storeResult.get("channelStatistics"); assertThat(channelStatistics).isNotNull(); assertThat(channelStatistics.get("channelId").asText()).isNotEmpty(); assertThat(channelStatistics.get("channelTaskQueueSize").asInt(-1)).isGreaterThanOrEqualTo(0); assertThat(channelStatistics.get("pendingRequestsCount").asInt(-1)).isGreaterThanOrEqualTo(0); assertThat(channelStatistics.get("lastReadTime").asText()).isNotEmpty(); assertThat(channelStatistics.get("waitForConnectionInit").asText()).isNotEmpty(); JsonNode connectionStateListenerMetrics = serviceEndpointStatistics.get("cerMetrics"); if (connectionStateListenerEnabled) { assertThat(connectionStateListenerMetrics).isNotNull(); assertThat(connectionStateListenerMetrics.get("lastCallTimestamp")).isNull(); assertThat(connectionStateListenerMetrics.get("lastActionableContext")).isNull(); } else { assertThat(connectionStateListenerMetrics).isNull(); } Instant beforeInitializationThreshold = beforeInitializingRntbdServiceEndpoint.minusMillis(1); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isAfterOrEqualTo(beforeInitializationThreshold); Instant afterInitializationThreshold = afterInitializingRntbdServiceEndpoint.plusMillis(2); assertThat(Instant.parse(serviceEndpointStatistics.get("createdTime").asText())) .isBeforeOrEqualTo(afterInitializationThreshold); Instant afterOperation2Threshold = afterOperation2.plusMillis(2); Instant beforeOperation2Threshold = beforeOperation2.minusMillis(2); assertThat(Instant.parse(serviceEndpointStatistics.get("lastRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold.toString()) .isBeforeOrEqualTo(afterOperation2Threshold.toString()); assertThat(Instant.parse(serviceEndpointStatistics.get("lastSuccessfulRequestTime").asText())) .isAfterOrEqualTo(beforeOperation2Threshold.toString()) .isBeforeOrEqualTo(afterOperation2Threshold.toString()); } private void validate(CosmosDiagnostics cosmosDiagnostics, int expectedRequestPayloadSize, int expectedResponsePayloadSize) throws Exception { ObjectNode diagnostics = (ObjectNode) OBJECT_MAPPER.readTree(cosmosDiagnostics.toString()); JsonNode responseStatisticsList = diagnostics.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); assertThat(responseStatisticsList.size()).isGreaterThan(0); JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); boolean hasPayload = storeResult.get("exceptionMessage") == null; assertThat(storeResult).isNotNull(); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("rntbdRequestLengthInBytes").asInt(-1)).isGreaterThan(expectedRequestPayloadSize); assertThat(storeResult.get("requestPayloadLengthInBytes").asInt(-1)).isEqualTo(expectedRequestPayloadSize); if (hasPayload) { assertThat(storeResult.get("responsePayloadLengthInBytes").asInt(-1)).isEqualTo(expectedResponsePayloadSize); } assertThat(storeResult.get("rntbdResponseLengthInBytes").asInt(-1)).isGreaterThan(expectedResponsePayloadSize); } @Test(groups = {"emulator"}, timeOut = TIMEOUT) public void addressResolutionStatistics() { CosmosClient client1 = null; CosmosClient client2 = null; String databaseId = DatabaseForTest.generateId(); String containerId = UUID.randomUUID().toString(); CosmosDatabase cosmosDatabase = null; CosmosContainer cosmosContainer = null; try { client1 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); client1.createDatabase(databaseId); cosmosDatabase = client1.getDatabase(databaseId); cosmosDatabase.createContainer(containerId, "/mypk"); InternalObjectNode internalObjectNode = getInternalObjectNode(); cosmosContainer = cosmosDatabase.getContainer(containerId); CosmosItemResponse<InternalObjectNode> writeResourceResponse = cosmosContainer.createItem(internalObjectNode); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(writeResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(writeResourceResponse.getDiagnostics().toString()).contains("\"exceptionMessage\":null"); client2 = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildClient(); cosmosDatabase = client2.getDatabase(databaseId); cosmosContainer = cosmosDatabase.getContainer(containerId); AsyncDocumentClient asyncDocumentClient = client2.asyncClient().getContextClient(); GlobalAddressResolver addressResolver = (GlobalAddressResolver) FieldUtils.readField(asyncDocumentClient, "addressResolver", true); @SuppressWarnings("rawtypes") Map addressCacheByEndpoint = (Map) FieldUtils.readField(addressResolver, "addressCacheByEndpoint", true); Object endpointCache = addressCacheByEndpoint.values().toArray()[0]; GatewayAddressCache addressCache = (GatewayAddressCache) FieldUtils.readField(endpointCache, "addressCache", true); HttpClient httpClient = httpClient(true); FieldUtils.writeField(addressCache, "httpClient", httpClient, true); new Thread(() -> { try { Thread.sleep(5000); HttpClient httpClient1 = httpClient(false); FieldUtils.writeField(addressCache, "httpClient", httpClient1, true); } catch (Exception e) { fail(e.getMessage()); } }).start(); PartitionKey partitionKey = new PartitionKey(internalObjectNode.get("mypk")); CosmosItemResponse<InternalObjectNode> readResourceResponse = cosmosContainer.readItem(internalObjectNode.getId(), partitionKey, new CosmosItemRequestOptions(), InternalObjectNode.class); assertThat(readResourceResponse.getDiagnostics().toString()).contains("addressResolutionStatistics"); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"inflightRequest\":false"); assertThat(readResourceResponse.getDiagnostics().toString()).doesNotContain("endTime=\"null\""); assertThat(readResourceResponse.getDiagnostics().toString()).contains("\"exceptionMessage\":\"io.netty" + ".channel.AbstractChannel$AnnotatedConnectException: Connection refused"); } catch (Exception ex) { logger.error("Error in test addressResolutionStatistics", ex); fail("This test should not throw exception " + ex); } finally { safeDeleteSyncDatabase(cosmosDatabase); if (client1 != null) { client1.close(); } if (client2 != null) { client2.close(); } } } private InternalObjectNode getInternalObjectNode() { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", uuid); return internalObjectNode; } private InternalObjectNode getInternalObjectNode(String pkValue) { InternalObjectNode internalObjectNode = new InternalObjectNode(); String uuid = UUID.randomUUID().toString(); internalObjectNode.setId(uuid); BridgeInternal.setProperty(internalObjectNode, "mypk", pkValue); return internalObjectNode; } private List<ClientSideRequestStatistics.StoreResponseStatistics> getStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); @SuppressWarnings({"unchecked"}) List<ClientSideRequestStatistics.StoreResponseStatistics> list = (List<ClientSideRequestStatistics.StoreResponseStatistics>) storeResponseStatisticsField.get(requestStatistics); return list; } private void clearStoreResponseStatistics(ClientSideRequestStatistics requestStatistics) throws Exception { Field storeResponseStatisticsField = ClientSideRequestStatistics.class.getDeclaredField("supplementalResponseStatisticsList"); storeResponseStatisticsField.setAccessible(true); storeResponseStatisticsField.set(requestStatistics, new ArrayList<ClientSideRequestStatistics.StoreResponseStatistics>()); } private void validateTransportRequestTimelineGateway(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"connectionConfigured\""); assertThat(diagnostics).contains("\"eventName\":\"requestSent\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"received\""); } private void validateTransportRequestTimelineDirect(String diagnostics) { assertThat(diagnostics).contains("\"eventName\":\"created\""); assertThat(diagnostics).contains("\"eventName\":\"queued\""); assertThat(diagnostics).contains("\"eventName\":\"channelAcquisitionStarted\""); assertThat(diagnostics).contains("\"eventName\":\"pipelined\""); assertThat(diagnostics).contains("\"eventName\":\"transitTime\""); assertThat(diagnostics).contains("\"eventName\":\"decodeTime"); assertThat(diagnostics).contains("\"eventName\":\"received\""); assertThat(diagnostics).contains("\"eventName\":\"completed\""); assertThat(diagnostics).contains("\"startTimeUTC\""); assertThat(diagnostics).contains("\"durationInMilliSecs\""); } public void isValidJSON(final String json) { try { final JsonParser parser = new JsonFactory().createParser(json); while (parser.nextToken() != null) { } } catch (IOException ex) { fail("Diagnostic string is not in json format ", ex); } } private HttpClient httpClient(boolean fakeProxy) { HttpClientConfig httpClientConfig; if(fakeProxy) { httpClientConfig = new HttpClientConfig(new Configs()) .withProxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 8888))); } else { httpClientConfig = new HttpClientConfig(new Configs()); } return HttpClient.createFixed(httpClientConfig); } private IndexUtilizationInfo createFromJSONString(String jsonString) { ObjectMapper indexUtilizationInfoObjectMapper = new ObjectMapper(); IndexUtilizationInfo indexUtilizationInfo = null; try { indexUtilizationInfo = indexUtilizationInfoObjectMapper.readValue(jsonString, IndexUtilizationInfo.class); } catch (JsonProcessingException e) { logger.error("Json not correctly formed ", e); } return indexUtilizationInfo; } private void validateRegionContacted(CosmosDiagnostics cosmosDiagnostics, CosmosAsyncClient cosmosAsyncClient) throws Exception { RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) ReflectionUtils.getAsyncDocumentClient(cosmosAsyncClient); GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); LocationCache locationCache = ReflectionUtils.getLocationCache(globalEndpointManager); Field locationInfoField = LocationCache.class.getDeclaredField("locationInfo"); locationInfoField.setAccessible(true); Object locationInfo = locationInfoField.get(locationCache); Class<?> DatabaseAccountLocationsInfoClass = Class.forName("com.azure.cosmos.implementation.routing" + ".LocationCache$DatabaseAccountLocationsInfo"); Field availableWriteEndpointByLocation = DatabaseAccountLocationsInfoClass.getDeclaredField( "availableWriteEndpointByLocation"); availableWriteEndpointByLocation.setAccessible(true); @SuppressWarnings("unchecked") Map<String, URI> map = (Map<String, URI>) availableWriteEndpointByLocation.get(locationInfo); String regionName = map.keySet().iterator().next(); assertThat(cosmosDiagnostics.getContactedRegionNames().size()).isEqualTo(1); assertThat(cosmosDiagnostics.getContactedRegionNames().iterator().next()).isEqualTo(regionName.toLowerCase()); } private void validateChannelStatistics(CosmosDiagnostics cosmosDiagnostics) { for (ClientSideRequestStatistics clientSideRequestStatistics : cosmosDiagnostics.getClientSideRequestStatistics()) { for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics : clientSideRequestStatistics.getResponseStatisticsList()) { assertThat(storeResponseStatistics).isNotNull(); RntbdChannelStatistics rntbdChannelStatistics = storeResponseStatistics .getStoreResult() .getStoreResponseDiagnostics() .getRntbdChannelStatistics(); assertThat(rntbdChannelStatistics).isNotNull(); try { String rntbdChannelStatisticsString = Utils.getSimpleObjectMapper().writeValueAsString(rntbdChannelStatistics); assertThat(rntbdChannelStatisticsString).contains("\"channelId\":\"" + rntbdChannelStatistics.getChannelId() + "\""); assertThat(rntbdChannelStatisticsString) .contains("\"channelTaskQueueSize\":" + rntbdChannelStatistics.getChannelTaskQueueSize()); assertThat(rntbdChannelStatisticsString) .contains("\"pendingRequestsCount\":" + rntbdChannelStatistics.getPendingRequestsCount()); assertThat(rntbdChannelStatisticsString) .contains("\"lastReadTime\":\"" + rntbdChannelStatistics.getLastReadTime() + "\""); if (rntbdChannelStatistics.getTransitTimeoutCount() > 0) { assertThat(rntbdChannelStatisticsString) .contains("\"transitTimeoutCount\":" + rntbdChannelStatistics.getTransitTimeoutCount()); assertThat(rntbdChannelStatisticsString) .contains("\"transitTimeoutStartingTime\":\"" + rntbdChannelStatistics.getTransitTimeoutStartingTime() + "\""); } else { assertThat(rntbdChannelStatisticsString) .doesNotContain("\"transitTimeoutCount\":" + rntbdChannelStatistics.getTransitTimeoutCount()); assertThat(rntbdChannelStatisticsString) .doesNotContain("\"transitTimeoutStartingTime\":\"" + rntbdChannelStatistics.getTransitTimeoutStartingTime() + "\""); } assertThat(rntbdChannelStatisticsString).contains("\"waitForConnectionInit\":" + rntbdChannelStatistics.isWaitForConnectionInit()); } catch (JsonProcessingException e) { fail("Failed to parse RntbdChannelStatistics"); } } } } public static class TestItem { public String id; public String mypk; public TestItem() { } } }
I think we should continue to send the `Date` header if possible. From the shared key auth docs it sounds like this *should* be possible, but I know there were some questions around whether that's accurate.
private HttpPipeline createHttpPipeline() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; HttpLogOptions localHttpLogOptions = this.httpLogOptions == null ? new HttpLogOptions() : this.httpLogOptions; ClientOptions localClientOptions = this.clientOptions == null ? new ClientOptions() : this.clientOptions; List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); String applicationId = CoreUtils.getApplicationId(localClientOptions, localHttpLogOptions); policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersFromContextPolicy()); HttpHeaders headers = new HttpHeaders(); localClientOptions.getHeaders().forEach(header -> headers.set(header.getName(), header.getValue())); if (headers.getSize() > 0) { policies.add(new AddHeadersPolicy(headers)); } this.pipelinePolicies.stream() .filter(p -> p.getPipelinePosition() == HttpPipelinePosition.PER_CALL) .forEach(p -> policies.add(p)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions, new RetryPolicy())); policies.add(new CookiePolicy()); if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, DEFAULT_SCOPES)); } else if (batchSharedKeyCred != null) { policies.add(new BatchSharedKeyCredentialsPolicy(batchSharedKeyCred)); } this.pipelinePolicies.stream() .filter(p -> p.getPipelinePosition() == HttpPipelinePosition.PER_RETRY) .forEach(p -> policies.add(p)); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline httpPipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .clientOptions(localClientOptions) .build(); return httpPipeline; }
if (headers.getSize() > 0) {
private HttpPipeline createHttpPipeline() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; HttpLogOptions localHttpLogOptions = this.httpLogOptions == null ? new HttpLogOptions() : this.httpLogOptions; ClientOptions localClientOptions = this.clientOptions == null ? new ClientOptions() : this.clientOptions; List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); String applicationId = CoreUtils.getApplicationId(localClientOptions, localHttpLogOptions); policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersFromContextPolicy()); HttpHeaders headers = new HttpHeaders(); localClientOptions.getHeaders().forEach(header -> headers.set(header.getName(), header.getValue())); if (headers.getSize() > 0) { policies.add(new AddHeadersPolicy(headers)); } this.pipelinePolicies.stream() .filter(p -> p.getPipelinePosition() == HttpPipelinePosition.PER_CALL) .forEach(p -> policies.add(p)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions, new RetryPolicy())); policies.add(new AddDatePolicy()); policies.add(new CookiePolicy()); if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, DEFAULT_SCOPES)); } else if (batchSharedKeyCred != null) { policies.add(new BatchSharedKeyCredentialsPolicy(batchSharedKeyCred)); } this.pipelinePolicies.stream() .filter(p -> p.getPipelinePosition() == HttpPipelinePosition.PER_RETRY) .forEach(p -> policies.add(p)); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline httpPipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .clientOptions(localClientOptions) .build(); return httpPipeline; }
class BatchServiceClientBuilder implements HttpTrait<BatchServiceClientBuilder>, ConfigurationTrait<BatchServiceClientBuilder>, TokenCredentialTrait<BatchServiceClientBuilder>, EndpointTrait<BatchServiceClientBuilder> { @Generated private static final String SDK_NAME = "name"; @Generated private static final String SDK_VERSION = "version"; @Generated private static final String[] DEFAULT_SCOPES = new String[] {"https: @Generated private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-compute-batch.properties"); @Generated private final List<HttpPipelinePolicy> pipelinePolicies; /** Create an instance of the BatchServiceClientBuilder. */ @Generated public BatchServiceClientBuilder() { this.pipelinePolicies = new ArrayList<>(); } /* * The HTTP pipeline to send requests through. */ @Generated private HttpPipeline pipeline; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder pipeline(HttpPipeline pipeline) { this.pipeline = pipeline; return this; } /* * The HTTP client used to send the request. */ @Generated private HttpClient httpClient; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /* * The logging configuration for HTTP requests and responses. */ @Generated private HttpLogOptions httpLogOptions; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) { this.httpLogOptions = httpLogOptions; return this; } /* * The client options such as application ID and custom headers to set on a request. */ @Generated private ClientOptions clientOptions; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /* * The retry options to configure retry policy for failed requests. */ @Generated private RetryOptions retryOptions; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder retryOptions(RetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder addPolicy(HttpPipelinePolicy customPolicy) { Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null."); pipelinePolicies.add(customPolicy); return this; } /* * The configuration store that is used during construction of the service client. */ @Generated private Configuration configuration; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /* * The TokenCredential used for authentication. */ @Generated private TokenCredential tokenCredential; /** {@inheritDoc}. */ @Override public BatchServiceClientBuilder credential(TokenCredential tokenCredential) { this.tokenCredential = tokenCredential; return this; } private BatchSharedKeyCredentials batchSharedKeyCred; public BatchServiceClientBuilder credential(BatchSharedKeyCredentials batchSharedKeyCred) { this.batchSharedKeyCred = Objects.requireNonNull(batchSharedKeyCred, "'batchSharedKeyCred' cannot be null."); this.tokenCredential = null; return this; } /* * The service endpoint */ @Generated private String endpoint; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder endpoint(String endpoint) { this.endpoint = endpoint; return this; } /* * Service version */ @Generated private BatchServiceVersion serviceVersion; /** * Sets Service version. * * @param serviceVersion the serviceVersion value. * @return the BatchServiceClientBuilder. */ @Generated public BatchServiceClientBuilder serviceVersion(BatchServiceVersion serviceVersion) { this.serviceVersion = serviceVersion; return this; } /* * The retry policy that will attempt to retry failed requests, if applicable. */ @Generated private RetryPolicy retryPolicy; /** * Sets The retry policy that will attempt to retry failed requests, if applicable. * * @param retryPolicy the retryPolicy value. * @return the BatchServiceClientBuilder. */ @Generated public BatchServiceClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Builds an instance of BatchServiceClientImpl with the provided parameters. * * @return an instance of BatchServiceClientImpl. */ @Generated private BatchServiceClientImpl buildInnerClient() { HttpPipeline localPipeline = (pipeline != null) ? pipeline : createHttpPipeline(); BatchServiceVersion localServiceVersion = (serviceVersion != null) ? serviceVersion : BatchServiceVersion.getLatest(); BatchServiceClientImpl client = new BatchServiceClientImpl( localPipeline, JacksonAdapter.createDefaultSerializerAdapter(), endpoint, localServiceVersion); return client; } /** * Builds an instance of ApplicationsAsyncClient class. * * @return an instance of ApplicationsAsyncClient. */ @Generated public ApplicationsAsyncClient buildApplicationsAsyncClient() { return new ApplicationsAsyncClient(buildInnerClient().getApplications()); } /** * Builds an instance of PoolAsyncClient class. * * @return an instance of PoolAsyncClient. */ @Generated public PoolAsyncClient buildPoolAsyncClient() { return new PoolAsyncClient(buildInnerClient().getPools()); } /** * Builds an instance of AccountAsyncClient class. * * @return an instance of AccountAsyncClient. */ @Generated public AccountAsyncClient buildAccountAsyncClient() { return new AccountAsyncClient(buildInnerClient().getAccounts()); } /** * Builds an instance of JobAsyncClient class. * * @return an instance of JobAsyncClient. */ @Generated public JobAsyncClient buildJobAsyncClient() { return new JobAsyncClient(buildInnerClient().getJobs()); } /** * Builds an instance of CertificatesAsyncClient class. * * @return an instance of CertificatesAsyncClient. */ @Generated public CertificatesAsyncClient buildCertificatesAsyncClient() { return new CertificatesAsyncClient(buildInnerClient().getCertificates()); } /** * Builds an instance of FileAsyncClient class. * * @return an instance of FileAsyncClient. */ @Generated public FileAsyncClient buildFileAsyncClient() { return new FileAsyncClient(buildInnerClient().getFiles()); } /** * Builds an instance of JobScheduleAsyncClient class. * * @return an instance of JobScheduleAsyncClient. */ @Generated public JobScheduleAsyncClient buildJobScheduleAsyncClient() { return new JobScheduleAsyncClient(buildInnerClient().getJobSchedules()); } /** * Builds an instance of TaskAsyncClient class. * * @return an instance of TaskAsyncClient. */ @Generated public TaskAsyncClient buildTaskAsyncClient() { return new TaskAsyncClient(buildInnerClient().getTasks()); } /** * Builds an instance of ComputeNodesAsyncClient class. * * @return an instance of ComputeNodesAsyncClient. */ @Generated public ComputeNodesAsyncClient buildComputeNodesAsyncClient() { return new ComputeNodesAsyncClient(buildInnerClient().getComputeNodes()); } /** * Builds an instance of ComputeNodeExtensionsAsyncClient class. * * @return an instance of ComputeNodeExtensionsAsyncClient. */ @Generated public ComputeNodeExtensionsAsyncClient buildComputeNodeExtensionsAsyncClient() { return new ComputeNodeExtensionsAsyncClient(buildInnerClient().getComputeNodeExtensions()); } /** * Builds an instance of ApplicationsClient class. * * @return an instance of ApplicationsClient. */ @Generated public ApplicationsClient buildApplicationsClient() { return new ApplicationsClient(new ApplicationsAsyncClient(buildInnerClient().getApplications())); } /** * Builds an instance of PoolClient class. * * @return an instance of PoolClient. */ @Generated public PoolClient buildPoolClient() { return new PoolClient(new PoolAsyncClient(buildInnerClient().getPools())); } /** * Builds an instance of AccountClient class. * * @return an instance of AccountClient. */ @Generated public AccountClient buildAccountClient() { return new AccountClient(new AccountAsyncClient(buildInnerClient().getAccounts())); } /** * Builds an instance of JobClient class. * * @return an instance of JobClient. */ @Generated public JobClient buildJobClient() { return new JobClient(new JobAsyncClient(buildInnerClient().getJobs())); } /** * Builds an instance of CertificatesClient class. * * @return an instance of CertificatesClient. */ @Generated public CertificatesClient buildCertificatesClient() { return new CertificatesClient(new CertificatesAsyncClient(buildInnerClient().getCertificates())); } /** * Builds an instance of FileClient class. * * @return an instance of FileClient. */ @Generated public FileClient buildFileClient() { return new FileClient(new FileAsyncClient(buildInnerClient().getFiles())); } /** * Builds an instance of JobScheduleClient class. * * @return an instance of JobScheduleClient. */ @Generated public JobScheduleClient buildJobScheduleClient() { return new JobScheduleClient(new JobScheduleAsyncClient(buildInnerClient().getJobSchedules())); } /** * Builds an instance of TaskClient class. * * @return an instance of TaskClient. */ @Generated public TaskClient buildTaskClient() { return new TaskClient(new TaskAsyncClient(buildInnerClient().getTasks())); } /** * Builds an instance of ComputeNodesClient class. * * @return an instance of ComputeNodesClient. */ @Generated public ComputeNodesClient buildComputeNodesClient() { return new ComputeNodesClient(new ComputeNodesAsyncClient(buildInnerClient().getComputeNodes())); } /** * Builds an instance of ComputeNodeExtensionsClient class. * * @return an instance of ComputeNodeExtensionsClient. */ @Generated public ComputeNodeExtensionsClient buildComputeNodeExtensionsClient() { return new ComputeNodeExtensionsClient( new ComputeNodeExtensionsAsyncClient(buildInnerClient().getComputeNodeExtensions())); } }
class BatchServiceClientBuilder implements HttpTrait<BatchServiceClientBuilder>, ConfigurationTrait<BatchServiceClientBuilder>, TokenCredentialTrait<BatchServiceClientBuilder>, EndpointTrait<BatchServiceClientBuilder> { @Generated private static final String SDK_NAME = "name"; @Generated private static final String SDK_VERSION = "version"; @Generated private static final String[] DEFAULT_SCOPES = new String[] {"https: @Generated private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-compute-batch.properties"); @Generated private final List<HttpPipelinePolicy> pipelinePolicies; /** Create an instance of the BatchServiceClientBuilder. */ @Generated public BatchServiceClientBuilder() { this.pipelinePolicies = new ArrayList<>(); } /* * The HTTP pipeline to send requests through. */ @Generated private HttpPipeline pipeline; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder pipeline(HttpPipeline pipeline) { this.pipeline = pipeline; return this; } /* * The HTTP client used to send the request. */ @Generated private HttpClient httpClient; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /* * The logging configuration for HTTP requests and responses. */ @Generated private HttpLogOptions httpLogOptions; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) { this.httpLogOptions = httpLogOptions; return this; } /* * The client options such as application ID and custom headers to set on a request. */ @Generated private ClientOptions clientOptions; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /* * The retry options to configure retry policy for failed requests. */ @Generated private RetryOptions retryOptions; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder retryOptions(RetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder addPolicy(HttpPipelinePolicy customPolicy) { Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null."); pipelinePolicies.add(customPolicy); return this; } /* * The configuration store that is used during construction of the service client. */ @Generated private Configuration configuration; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /* * The TokenCredential used for authentication. */ @Generated private TokenCredential tokenCredential; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder credential(TokenCredential tokenCredential) { this.tokenCredential = tokenCredential; return this; } private BatchSharedKeyCredentials batchSharedKeyCred; public BatchServiceClientBuilder credential(BatchSharedKeyCredentials batchSharedKeyCred) { this.batchSharedKeyCred = Objects.requireNonNull(batchSharedKeyCred, "'batchSharedKeyCred' cannot be null."); this.tokenCredential = null; return this; } /* * The service endpoint */ @Generated private String endpoint; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder endpoint(String endpoint) { this.endpoint = endpoint; return this; } /* * Service version */ @Generated private BatchServiceVersion serviceVersion; /** * Sets Service version. * * @param serviceVersion the serviceVersion value. * @return the BatchServiceClientBuilder. */ @Generated public BatchServiceClientBuilder serviceVersion(BatchServiceVersion serviceVersion) { this.serviceVersion = serviceVersion; return this; } /* * The retry policy that will attempt to retry failed requests, if applicable. */ @Generated private RetryPolicy retryPolicy; /** * Sets The retry policy that will attempt to retry failed requests, if applicable. * * @param retryPolicy the retryPolicy value. * @return the BatchServiceClientBuilder. */ @Generated public BatchServiceClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Builds an instance of BatchServiceClientImpl with the provided parameters. * * @return an instance of BatchServiceClientImpl. */ @Generated private BatchServiceClientImpl buildInnerClient() { HttpPipeline localPipeline = (pipeline != null) ? pipeline : createHttpPipeline(); BatchServiceVersion localServiceVersion = (serviceVersion != null) ? serviceVersion : BatchServiceVersion.getLatest(); BatchServiceClientImpl client = new BatchServiceClientImpl( localPipeline, JacksonAdapter.createDefaultSerializerAdapter(), endpoint, localServiceVersion); return client; } /** * Builds an instance of ApplicationsAsyncClient class. * * @return an instance of ApplicationsAsyncClient. */ @Generated public ApplicationsAsyncClient buildApplicationsAsyncClient() { return new ApplicationsAsyncClient(buildInnerClient().getApplications()); } /** * Builds an instance of PoolAsyncClient class. * * @return an instance of PoolAsyncClient. */ @Generated public PoolAsyncClient buildPoolAsyncClient() { return new PoolAsyncClient(buildInnerClient().getPools()); } /** * Builds an instance of AccountAsyncClient class. * * @return an instance of AccountAsyncClient. */ @Generated public AccountAsyncClient buildAccountAsyncClient() { return new AccountAsyncClient(buildInnerClient().getAccounts()); } /** * Builds an instance of JobAsyncClient class. * * @return an instance of JobAsyncClient. */ @Generated public JobAsyncClient buildJobAsyncClient() { return new JobAsyncClient(buildInnerClient().getJobs()); } /** * Builds an instance of CertificatesAsyncClient class. * * @return an instance of CertificatesAsyncClient. */ @Generated public CertificatesAsyncClient buildCertificatesAsyncClient() { return new CertificatesAsyncClient(buildInnerClient().getCertificates()); } /** * Builds an instance of FileAsyncClient class. * * @return an instance of FileAsyncClient. */ @Generated public FileAsyncClient buildFileAsyncClient() { return new FileAsyncClient(buildInnerClient().getFiles()); } /** * Builds an instance of JobScheduleAsyncClient class. * * @return an instance of JobScheduleAsyncClient. */ @Generated public JobScheduleAsyncClient buildJobScheduleAsyncClient() { return new JobScheduleAsyncClient(buildInnerClient().getJobSchedules()); } /** * Builds an instance of TaskAsyncClient class. * * @return an instance of TaskAsyncClient. */ @Generated public TaskAsyncClient buildTaskAsyncClient() { return new TaskAsyncClient(buildInnerClient().getTasks()); } /** * Builds an instance of ComputeNodesAsyncClient class. * * @return an instance of ComputeNodesAsyncClient. */ @Generated public ComputeNodesAsyncClient buildComputeNodesAsyncClient() { return new ComputeNodesAsyncClient(buildInnerClient().getComputeNodes()); } /** * Builds an instance of ComputeNodeExtensionsAsyncClient class. * * @return an instance of ComputeNodeExtensionsAsyncClient. */ @Generated public ComputeNodeExtensionsAsyncClient buildComputeNodeExtensionsAsyncClient() { return new ComputeNodeExtensionsAsyncClient(buildInnerClient().getComputeNodeExtensions()); } /** * Builds an instance of ApplicationsClient class. * * @return an instance of ApplicationsClient. */ @Generated public ApplicationsClient buildApplicationsClient() { return new ApplicationsClient(new ApplicationsAsyncClient(buildInnerClient().getApplications())); } /** * Builds an instance of PoolClient class. * * @return an instance of PoolClient. */ @Generated public PoolClient buildPoolClient() { return new PoolClient(new PoolAsyncClient(buildInnerClient().getPools())); } /** * Builds an instance of AccountClient class. * * @return an instance of AccountClient. */ @Generated public AccountClient buildAccountClient() { return new AccountClient(new AccountAsyncClient(buildInnerClient().getAccounts())); } /** * Builds an instance of JobClient class. * * @return an instance of JobClient. */ @Generated public JobClient buildJobClient() { return new JobClient(new JobAsyncClient(buildInnerClient().getJobs())); } /** * Builds an instance of CertificatesClient class. * * @return an instance of CertificatesClient. */ @Generated public CertificatesClient buildCertificatesClient() { return new CertificatesClient(new CertificatesAsyncClient(buildInnerClient().getCertificates())); } /** * Builds an instance of FileClient class. * * @return an instance of FileClient. */ @Generated public FileClient buildFileClient() { return new FileClient(new FileAsyncClient(buildInnerClient().getFiles())); } /** * Builds an instance of JobScheduleClient class. * * @return an instance of JobScheduleClient. */ @Generated public JobScheduleClient buildJobScheduleClient() { return new JobScheduleClient(new JobScheduleAsyncClient(buildInnerClient().getJobSchedules())); } /** * Builds an instance of TaskClient class. * * @return an instance of TaskClient. */ @Generated public TaskClient buildTaskClient() { return new TaskClient(new TaskAsyncClient(buildInnerClient().getTasks())); } /** * Builds an instance of ComputeNodesClient class. * * @return an instance of ComputeNodesClient. */ @Generated public ComputeNodesClient buildComputeNodesClient() { return new ComputeNodesClient(new ComputeNodesAsyncClient(buildInnerClient().getComputeNodes())); } /** * Builds an instance of ComputeNodeExtensionsClient class. * * @return an instance of ComputeNodeExtensionsClient. */ @Generated public ComputeNodeExtensionsClient buildComputeNodeExtensionsClient() { return new ComputeNodeExtensionsClient( new ComputeNodeExtensionsAsyncClient(buildInnerClient().getComputeNodeExtensions())); } }
If we can't send the `Date` header when using shared key auth, we should still send it when using other kinds of auth.
private HttpPipeline createHttpPipeline() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; HttpLogOptions localHttpLogOptions = this.httpLogOptions == null ? new HttpLogOptions() : this.httpLogOptions; ClientOptions localClientOptions = this.clientOptions == null ? new ClientOptions() : this.clientOptions; List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); String applicationId = CoreUtils.getApplicationId(localClientOptions, localHttpLogOptions); policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersFromContextPolicy()); HttpHeaders headers = new HttpHeaders(); localClientOptions.getHeaders().forEach(header -> headers.set(header.getName(), header.getValue())); if (headers.getSize() > 0) { policies.add(new AddHeadersPolicy(headers)); } this.pipelinePolicies.stream() .filter(p -> p.getPipelinePosition() == HttpPipelinePosition.PER_CALL) .forEach(p -> policies.add(p)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions, new RetryPolicy())); policies.add(new CookiePolicy()); if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, DEFAULT_SCOPES)); } else if (batchSharedKeyCred != null) { policies.add(new BatchSharedKeyCredentialsPolicy(batchSharedKeyCred)); } this.pipelinePolicies.stream() .filter(p -> p.getPipelinePosition() == HttpPipelinePosition.PER_RETRY) .forEach(p -> policies.add(p)); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline httpPipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .clientOptions(localClientOptions) .build(); return httpPipeline; }
if (headers.getSize() > 0) {
private HttpPipeline createHttpPipeline() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; HttpLogOptions localHttpLogOptions = this.httpLogOptions == null ? new HttpLogOptions() : this.httpLogOptions; ClientOptions localClientOptions = this.clientOptions == null ? new ClientOptions() : this.clientOptions; List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); String applicationId = CoreUtils.getApplicationId(localClientOptions, localHttpLogOptions); policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersFromContextPolicy()); HttpHeaders headers = new HttpHeaders(); localClientOptions.getHeaders().forEach(header -> headers.set(header.getName(), header.getValue())); if (headers.getSize() > 0) { policies.add(new AddHeadersPolicy(headers)); } this.pipelinePolicies.stream() .filter(p -> p.getPipelinePosition() == HttpPipelinePosition.PER_CALL) .forEach(p -> policies.add(p)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions, new RetryPolicy())); policies.add(new AddDatePolicy()); policies.add(new CookiePolicy()); if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, DEFAULT_SCOPES)); } else if (batchSharedKeyCred != null) { policies.add(new BatchSharedKeyCredentialsPolicy(batchSharedKeyCred)); } this.pipelinePolicies.stream() .filter(p -> p.getPipelinePosition() == HttpPipelinePosition.PER_RETRY) .forEach(p -> policies.add(p)); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline httpPipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .clientOptions(localClientOptions) .build(); return httpPipeline; }
class BatchServiceClientBuilder implements HttpTrait<BatchServiceClientBuilder>, ConfigurationTrait<BatchServiceClientBuilder>, TokenCredentialTrait<BatchServiceClientBuilder>, EndpointTrait<BatchServiceClientBuilder> { @Generated private static final String SDK_NAME = "name"; @Generated private static final String SDK_VERSION = "version"; @Generated private static final String[] DEFAULT_SCOPES = new String[] {"https: @Generated private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-compute-batch.properties"); @Generated private final List<HttpPipelinePolicy> pipelinePolicies; /** Create an instance of the BatchServiceClientBuilder. */ @Generated public BatchServiceClientBuilder() { this.pipelinePolicies = new ArrayList<>(); } /* * The HTTP pipeline to send requests through. */ @Generated private HttpPipeline pipeline; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder pipeline(HttpPipeline pipeline) { this.pipeline = pipeline; return this; } /* * The HTTP client used to send the request. */ @Generated private HttpClient httpClient; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /* * The logging configuration for HTTP requests and responses. */ @Generated private HttpLogOptions httpLogOptions; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) { this.httpLogOptions = httpLogOptions; return this; } /* * The client options such as application ID and custom headers to set on a request. */ @Generated private ClientOptions clientOptions; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /* * The retry options to configure retry policy for failed requests. */ @Generated private RetryOptions retryOptions; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder retryOptions(RetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder addPolicy(HttpPipelinePolicy customPolicy) { Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null."); pipelinePolicies.add(customPolicy); return this; } /* * The configuration store that is used during construction of the service client. */ @Generated private Configuration configuration; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /* * The TokenCredential used for authentication. */ @Generated private TokenCredential tokenCredential; /** {@inheritDoc}. */ @Override public BatchServiceClientBuilder credential(TokenCredential tokenCredential) { this.tokenCredential = tokenCredential; return this; } private BatchSharedKeyCredentials batchSharedKeyCred; public BatchServiceClientBuilder credential(BatchSharedKeyCredentials batchSharedKeyCred) { this.batchSharedKeyCred = Objects.requireNonNull(batchSharedKeyCred, "'batchSharedKeyCred' cannot be null."); this.tokenCredential = null; return this; } /* * The service endpoint */ @Generated private String endpoint; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder endpoint(String endpoint) { this.endpoint = endpoint; return this; } /* * Service version */ @Generated private BatchServiceVersion serviceVersion; /** * Sets Service version. * * @param serviceVersion the serviceVersion value. * @return the BatchServiceClientBuilder. */ @Generated public BatchServiceClientBuilder serviceVersion(BatchServiceVersion serviceVersion) { this.serviceVersion = serviceVersion; return this; } /* * The retry policy that will attempt to retry failed requests, if applicable. */ @Generated private RetryPolicy retryPolicy; /** * Sets The retry policy that will attempt to retry failed requests, if applicable. * * @param retryPolicy the retryPolicy value. * @return the BatchServiceClientBuilder. */ @Generated public BatchServiceClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Builds an instance of BatchServiceClientImpl with the provided parameters. * * @return an instance of BatchServiceClientImpl. */ @Generated private BatchServiceClientImpl buildInnerClient() { HttpPipeline localPipeline = (pipeline != null) ? pipeline : createHttpPipeline(); BatchServiceVersion localServiceVersion = (serviceVersion != null) ? serviceVersion : BatchServiceVersion.getLatest(); BatchServiceClientImpl client = new BatchServiceClientImpl( localPipeline, JacksonAdapter.createDefaultSerializerAdapter(), endpoint, localServiceVersion); return client; } /** * Builds an instance of ApplicationsAsyncClient class. * * @return an instance of ApplicationsAsyncClient. */ @Generated public ApplicationsAsyncClient buildApplicationsAsyncClient() { return new ApplicationsAsyncClient(buildInnerClient().getApplications()); } /** * Builds an instance of PoolAsyncClient class. * * @return an instance of PoolAsyncClient. */ @Generated public PoolAsyncClient buildPoolAsyncClient() { return new PoolAsyncClient(buildInnerClient().getPools()); } /** * Builds an instance of AccountAsyncClient class. * * @return an instance of AccountAsyncClient. */ @Generated public AccountAsyncClient buildAccountAsyncClient() { return new AccountAsyncClient(buildInnerClient().getAccounts()); } /** * Builds an instance of JobAsyncClient class. * * @return an instance of JobAsyncClient. */ @Generated public JobAsyncClient buildJobAsyncClient() { return new JobAsyncClient(buildInnerClient().getJobs()); } /** * Builds an instance of CertificatesAsyncClient class. * * @return an instance of CertificatesAsyncClient. */ @Generated public CertificatesAsyncClient buildCertificatesAsyncClient() { return new CertificatesAsyncClient(buildInnerClient().getCertificates()); } /** * Builds an instance of FileAsyncClient class. * * @return an instance of FileAsyncClient. */ @Generated public FileAsyncClient buildFileAsyncClient() { return new FileAsyncClient(buildInnerClient().getFiles()); } /** * Builds an instance of JobScheduleAsyncClient class. * * @return an instance of JobScheduleAsyncClient. */ @Generated public JobScheduleAsyncClient buildJobScheduleAsyncClient() { return new JobScheduleAsyncClient(buildInnerClient().getJobSchedules()); } /** * Builds an instance of TaskAsyncClient class. * * @return an instance of TaskAsyncClient. */ @Generated public TaskAsyncClient buildTaskAsyncClient() { return new TaskAsyncClient(buildInnerClient().getTasks()); } /** * Builds an instance of ComputeNodesAsyncClient class. * * @return an instance of ComputeNodesAsyncClient. */ @Generated public ComputeNodesAsyncClient buildComputeNodesAsyncClient() { return new ComputeNodesAsyncClient(buildInnerClient().getComputeNodes()); } /** * Builds an instance of ComputeNodeExtensionsAsyncClient class. * * @return an instance of ComputeNodeExtensionsAsyncClient. */ @Generated public ComputeNodeExtensionsAsyncClient buildComputeNodeExtensionsAsyncClient() { return new ComputeNodeExtensionsAsyncClient(buildInnerClient().getComputeNodeExtensions()); } /** * Builds an instance of ApplicationsClient class. * * @return an instance of ApplicationsClient. */ @Generated public ApplicationsClient buildApplicationsClient() { return new ApplicationsClient(new ApplicationsAsyncClient(buildInnerClient().getApplications())); } /** * Builds an instance of PoolClient class. * * @return an instance of PoolClient. */ @Generated public PoolClient buildPoolClient() { return new PoolClient(new PoolAsyncClient(buildInnerClient().getPools())); } /** * Builds an instance of AccountClient class. * * @return an instance of AccountClient. */ @Generated public AccountClient buildAccountClient() { return new AccountClient(new AccountAsyncClient(buildInnerClient().getAccounts())); } /** * Builds an instance of JobClient class. * * @return an instance of JobClient. */ @Generated public JobClient buildJobClient() { return new JobClient(new JobAsyncClient(buildInnerClient().getJobs())); } /** * Builds an instance of CertificatesClient class. * * @return an instance of CertificatesClient. */ @Generated public CertificatesClient buildCertificatesClient() { return new CertificatesClient(new CertificatesAsyncClient(buildInnerClient().getCertificates())); } /** * Builds an instance of FileClient class. * * @return an instance of FileClient. */ @Generated public FileClient buildFileClient() { return new FileClient(new FileAsyncClient(buildInnerClient().getFiles())); } /** * Builds an instance of JobScheduleClient class. * * @return an instance of JobScheduleClient. */ @Generated public JobScheduleClient buildJobScheduleClient() { return new JobScheduleClient(new JobScheduleAsyncClient(buildInnerClient().getJobSchedules())); } /** * Builds an instance of TaskClient class. * * @return an instance of TaskClient. */ @Generated public TaskClient buildTaskClient() { return new TaskClient(new TaskAsyncClient(buildInnerClient().getTasks())); } /** * Builds an instance of ComputeNodesClient class. * * @return an instance of ComputeNodesClient. */ @Generated public ComputeNodesClient buildComputeNodesClient() { return new ComputeNodesClient(new ComputeNodesAsyncClient(buildInnerClient().getComputeNodes())); } /** * Builds an instance of ComputeNodeExtensionsClient class. * * @return an instance of ComputeNodeExtensionsClient. */ @Generated public ComputeNodeExtensionsClient buildComputeNodeExtensionsClient() { return new ComputeNodeExtensionsClient( new ComputeNodeExtensionsAsyncClient(buildInnerClient().getComputeNodeExtensions())); } }
class BatchServiceClientBuilder implements HttpTrait<BatchServiceClientBuilder>, ConfigurationTrait<BatchServiceClientBuilder>, TokenCredentialTrait<BatchServiceClientBuilder>, EndpointTrait<BatchServiceClientBuilder> { @Generated private static final String SDK_NAME = "name"; @Generated private static final String SDK_VERSION = "version"; @Generated private static final String[] DEFAULT_SCOPES = new String[] {"https: @Generated private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-compute-batch.properties"); @Generated private final List<HttpPipelinePolicy> pipelinePolicies; /** Create an instance of the BatchServiceClientBuilder. */ @Generated public BatchServiceClientBuilder() { this.pipelinePolicies = new ArrayList<>(); } /* * The HTTP pipeline to send requests through. */ @Generated private HttpPipeline pipeline; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder pipeline(HttpPipeline pipeline) { this.pipeline = pipeline; return this; } /* * The HTTP client used to send the request. */ @Generated private HttpClient httpClient; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /* * The logging configuration for HTTP requests and responses. */ @Generated private HttpLogOptions httpLogOptions; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) { this.httpLogOptions = httpLogOptions; return this; } /* * The client options such as application ID and custom headers to set on a request. */ @Generated private ClientOptions clientOptions; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /* * The retry options to configure retry policy for failed requests. */ @Generated private RetryOptions retryOptions; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder retryOptions(RetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder addPolicy(HttpPipelinePolicy customPolicy) { Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null."); pipelinePolicies.add(customPolicy); return this; } /* * The configuration store that is used during construction of the service client. */ @Generated private Configuration configuration; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /* * The TokenCredential used for authentication. */ @Generated private TokenCredential tokenCredential; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder credential(TokenCredential tokenCredential) { this.tokenCredential = tokenCredential; return this; } private BatchSharedKeyCredentials batchSharedKeyCred; public BatchServiceClientBuilder credential(BatchSharedKeyCredentials batchSharedKeyCred) { this.batchSharedKeyCred = Objects.requireNonNull(batchSharedKeyCred, "'batchSharedKeyCred' cannot be null."); this.tokenCredential = null; return this; } /* * The service endpoint */ @Generated private String endpoint; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder endpoint(String endpoint) { this.endpoint = endpoint; return this; } /* * Service version */ @Generated private BatchServiceVersion serviceVersion; /** * Sets Service version. * * @param serviceVersion the serviceVersion value. * @return the BatchServiceClientBuilder. */ @Generated public BatchServiceClientBuilder serviceVersion(BatchServiceVersion serviceVersion) { this.serviceVersion = serviceVersion; return this; } /* * The retry policy that will attempt to retry failed requests, if applicable. */ @Generated private RetryPolicy retryPolicy; /** * Sets The retry policy that will attempt to retry failed requests, if applicable. * * @param retryPolicy the retryPolicy value. * @return the BatchServiceClientBuilder. */ @Generated public BatchServiceClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Builds an instance of BatchServiceClientImpl with the provided parameters. * * @return an instance of BatchServiceClientImpl. */ @Generated private BatchServiceClientImpl buildInnerClient() { HttpPipeline localPipeline = (pipeline != null) ? pipeline : createHttpPipeline(); BatchServiceVersion localServiceVersion = (serviceVersion != null) ? serviceVersion : BatchServiceVersion.getLatest(); BatchServiceClientImpl client = new BatchServiceClientImpl( localPipeline, JacksonAdapter.createDefaultSerializerAdapter(), endpoint, localServiceVersion); return client; } /** * Builds an instance of ApplicationsAsyncClient class. * * @return an instance of ApplicationsAsyncClient. */ @Generated public ApplicationsAsyncClient buildApplicationsAsyncClient() { return new ApplicationsAsyncClient(buildInnerClient().getApplications()); } /** * Builds an instance of PoolAsyncClient class. * * @return an instance of PoolAsyncClient. */ @Generated public PoolAsyncClient buildPoolAsyncClient() { return new PoolAsyncClient(buildInnerClient().getPools()); } /** * Builds an instance of AccountAsyncClient class. * * @return an instance of AccountAsyncClient. */ @Generated public AccountAsyncClient buildAccountAsyncClient() { return new AccountAsyncClient(buildInnerClient().getAccounts()); } /** * Builds an instance of JobAsyncClient class. * * @return an instance of JobAsyncClient. */ @Generated public JobAsyncClient buildJobAsyncClient() { return new JobAsyncClient(buildInnerClient().getJobs()); } /** * Builds an instance of CertificatesAsyncClient class. * * @return an instance of CertificatesAsyncClient. */ @Generated public CertificatesAsyncClient buildCertificatesAsyncClient() { return new CertificatesAsyncClient(buildInnerClient().getCertificates()); } /** * Builds an instance of FileAsyncClient class. * * @return an instance of FileAsyncClient. */ @Generated public FileAsyncClient buildFileAsyncClient() { return new FileAsyncClient(buildInnerClient().getFiles()); } /** * Builds an instance of JobScheduleAsyncClient class. * * @return an instance of JobScheduleAsyncClient. */ @Generated public JobScheduleAsyncClient buildJobScheduleAsyncClient() { return new JobScheduleAsyncClient(buildInnerClient().getJobSchedules()); } /** * Builds an instance of TaskAsyncClient class. * * @return an instance of TaskAsyncClient. */ @Generated public TaskAsyncClient buildTaskAsyncClient() { return new TaskAsyncClient(buildInnerClient().getTasks()); } /** * Builds an instance of ComputeNodesAsyncClient class. * * @return an instance of ComputeNodesAsyncClient. */ @Generated public ComputeNodesAsyncClient buildComputeNodesAsyncClient() { return new ComputeNodesAsyncClient(buildInnerClient().getComputeNodes()); } /** * Builds an instance of ComputeNodeExtensionsAsyncClient class. * * @return an instance of ComputeNodeExtensionsAsyncClient. */ @Generated public ComputeNodeExtensionsAsyncClient buildComputeNodeExtensionsAsyncClient() { return new ComputeNodeExtensionsAsyncClient(buildInnerClient().getComputeNodeExtensions()); } /** * Builds an instance of ApplicationsClient class. * * @return an instance of ApplicationsClient. */ @Generated public ApplicationsClient buildApplicationsClient() { return new ApplicationsClient(new ApplicationsAsyncClient(buildInnerClient().getApplications())); } /** * Builds an instance of PoolClient class. * * @return an instance of PoolClient. */ @Generated public PoolClient buildPoolClient() { return new PoolClient(new PoolAsyncClient(buildInnerClient().getPools())); } /** * Builds an instance of AccountClient class. * * @return an instance of AccountClient. */ @Generated public AccountClient buildAccountClient() { return new AccountClient(new AccountAsyncClient(buildInnerClient().getAccounts())); } /** * Builds an instance of JobClient class. * * @return an instance of JobClient. */ @Generated public JobClient buildJobClient() { return new JobClient(new JobAsyncClient(buildInnerClient().getJobs())); } /** * Builds an instance of CertificatesClient class. * * @return an instance of CertificatesClient. */ @Generated public CertificatesClient buildCertificatesClient() { return new CertificatesClient(new CertificatesAsyncClient(buildInnerClient().getCertificates())); } /** * Builds an instance of FileClient class. * * @return an instance of FileClient. */ @Generated public FileClient buildFileClient() { return new FileClient(new FileAsyncClient(buildInnerClient().getFiles())); } /** * Builds an instance of JobScheduleClient class. * * @return an instance of JobScheduleClient. */ @Generated public JobScheduleClient buildJobScheduleClient() { return new JobScheduleClient(new JobScheduleAsyncClient(buildInnerClient().getJobSchedules())); } /** * Builds an instance of TaskClient class. * * @return an instance of TaskClient. */ @Generated public TaskClient buildTaskClient() { return new TaskClient(new TaskAsyncClient(buildInnerClient().getTasks())); } /** * Builds an instance of ComputeNodesClient class. * * @return an instance of ComputeNodesClient. */ @Generated public ComputeNodesClient buildComputeNodesClient() { return new ComputeNodesClient(new ComputeNodesAsyncClient(buildInnerClient().getComputeNodes())); } /** * Builds an instance of ComputeNodeExtensionsClient class. * * @return an instance of ComputeNodeExtensionsClient. */ @Generated public ComputeNodeExtensionsClient buildComputeNodeExtensionsClient() { return new ComputeNodeExtensionsClient( new ComputeNodeExtensionsAsyncClient(buildInnerClient().getComputeNodeExtensions())); } }
Assuming we can always set the `Date` header, let's just copy the value from there instead of using `now()` and potentially having different values for `Date` and `ocp-date`
public String signHeader(HttpRequest request) throws IOException { if (request.getHeaders().get("ocp-date") == null) { DateTimeRfc1123 rfcDate = new DateTimeRfc1123(now()); request.setHeader("ocp-date", rfcDate.toString()); } StringBuffer signature = new StringBuffer(request.getHttpMethod().toString()); signature.append("\n"); signature.append(headerValue(request, HttpHeaderName.CONTENT_ENCODING)).append("\n"); signature.append(headerValue(request, HttpHeaderName.CONTENT_LANGUAGE)).append("\n"); String contentLength = headerValue(request, HttpHeaderName.CONTENT_LENGTH); signature.append((contentLength == null || Long.parseLong(contentLength) < 0 ? "" : contentLength)).append("\n"); signature.append(headerValue(request, HttpHeaderName.CONTENT_MD5)).append("\n"); String contentType = headerValue(request, HttpHeaderName.CONTENT_TYPE); signature.append(contentType).append("\n"); signature.append(headerValue(request, HttpHeaderName.DATE)).append("\n"); signature.append(headerValue(request, HttpHeaderName.IF_MODIFIED_SINCE)).append("\n"); signature.append(headerValue(request, HttpHeaderName.IF_MATCH)).append("\n"); signature.append(headerValue(request, HttpHeaderName.IF_NONE_MATCH)).append("\n"); signature.append(headerValue(request, HttpHeaderName.IF_UNMODIFIED_SINCE)).append("\n"); signature.append(headerValue(request, HttpHeaderName.RANGE)).append("\n"); ArrayList<String> customHeaders = new ArrayList<>(); for (HttpHeader name : request.getHeaders()) { if (name.getName().toLowerCase().startsWith("ocp-")) { customHeaders.add(name.getName().toLowerCase()); } } Collections.sort(customHeaders); for (String canonicalHeader : customHeaders) { String value = request.getHeaders().getValue(canonicalHeader); value = value.replace('\n', ' ').replace('\r', ' ') .replaceAll("^[ ]+", ""); signature.append(canonicalHeader).append(":").append(value).append("\n"); } signature.append("/") .append(batchSharedKeyCred.accountName().toLowerCase()).append("/") .append(request.getUrl().getPath().replaceAll("^[/]+", "")); String query = request.getUrl().getQuery(); if (query != null) { Map<String, String> queryComponents = new TreeMap<>(); String[] pairs = query.split("&"); for (String pair : pairs) { int idx = pair.indexOf("="); String key = URLDecoder.decode(pair.substring(0, idx), "UTF-8") .toLowerCase(Locale.US); queryComponents.put( key, key + ":" + URLDecoder.decode(pair.substring(idx + 1), "UTF-8")); } for (Map.Entry<String, String> entry : queryComponents.entrySet()) { signature.append("\n").append(entry.getValue()); } } String signedSignature = sign(signature.toString()); String authorization = "SharedKey " + batchSharedKeyCred.accountName() + ":" + signedSignature; return authorization; }
DateTimeRfc1123 rfcDate = new DateTimeRfc1123(now());
public String signHeader(HttpRequest request) throws IOException { String dateHeaderToSign = headerValue(request, HttpHeaderName.DATE); if (request.getHeaders().get("ocp-date") == null) { if (dateHeaderToSign == null) { DateTimeRfc1123 rfcDate = new DateTimeRfc1123(now()); request.setHeader("ocp-date", rfcDate.toString()); dateHeaderToSign = ""; } } else { dateHeaderToSign = ""; } StringBuffer signature = new StringBuffer(request.getHttpMethod().toString()); signature.append("\n"); signature.append(headerValue(request, HttpHeaderName.CONTENT_ENCODING)).append("\n"); signature.append(headerValue(request, HttpHeaderName.CONTENT_LANGUAGE)).append("\n"); String contentLength = headerValue(request, HttpHeaderName.CONTENT_LENGTH); signature.append((contentLength == null || Long.parseLong(contentLength) < 0 ? "" : contentLength)).append("\n"); signature.append(headerValue(request, HttpHeaderName.CONTENT_MD5)).append("\n"); String contentType = headerValue(request, HttpHeaderName.CONTENT_TYPE); signature.append(contentType).append("\n"); signature.append(dateHeaderToSign).append("\n"); signature.append(headerValue(request, HttpHeaderName.IF_MODIFIED_SINCE)).append("\n"); signature.append(headerValue(request, HttpHeaderName.IF_MATCH)).append("\n"); signature.append(headerValue(request, HttpHeaderName.IF_NONE_MATCH)).append("\n"); signature.append(headerValue(request, HttpHeaderName.IF_UNMODIFIED_SINCE)).append("\n"); signature.append(headerValue(request, HttpHeaderName.RANGE)).append("\n"); ArrayList<String> customHeaders = new ArrayList<>(); for (HttpHeader name : request.getHeaders()) { if (name.getName().toLowerCase().startsWith("ocp-")) { customHeaders.add(name.getName().toLowerCase()); } } Collections.sort(customHeaders); for (String canonicalHeader : customHeaders) { String value = request.getHeaders().getValue(canonicalHeader); value = value.replace('\n', ' ').replace('\r', ' ') .replaceAll("^[ ]+", ""); signature.append(canonicalHeader).append(":").append(value).append("\n"); } signature.append("/") .append(batchSharedKeyCred.accountName().toLowerCase()).append("/") .append(request.getUrl().getPath().replaceAll("^[/]+", "")); String query = request.getUrl().getQuery(); if (query != null) { Map<String, String> queryComponents = new TreeMap<>(); String[] pairs = query.split("&"); for (String pair : pairs) { int idx = pair.indexOf("="); String key = URLDecoder.decode(pair.substring(0, idx), "UTF-8") .toLowerCase(Locale.US); queryComponents.put( key, key + ":" + URLDecoder.decode(pair.substring(idx + 1), "UTF-8")); } for (Map.Entry<String, String> entry : queryComponents.entrySet()) { signature.append("\n").append(entry.getValue()); } } String signedSignature = sign(signature.toString()); String authorization = "SharedKey " + batchSharedKeyCred.accountName() + ":" + signedSignature; return authorization; }
class BatchSharedKeyCredentialsPolicy implements HttpPipelinePolicy { private final BatchSharedKeyCredentials batchSharedKeyCred; private Mac hmacSha256; /** * Creates a SharedKey pipeline policy that adds the SharedKey into the request's authorization header. * * @param credential the SharedKey credential used to create the policy. */ public BatchSharedKeyCredentialsPolicy(BatchSharedKeyCredentials credential) { this.batchSharedKeyCred = credential; } /** * @return the {@link BatchSharedKeyCredentials} linked to the policy. */ public BatchSharedKeyCredentials sharedKeyCredential() { return this.batchSharedKeyCred; } private String headerValue(HttpRequest request, HttpHeaderName headerName) { HttpHeaders headers = request.getHeaders(); Header header = headers.get(headerName); if (header == null) { return ""; } return header.getValue(); } private synchronized String sign(String stringToSign) { try { byte[] digest = getHmac256().doFinal(stringToSign.getBytes("UTF-8")); return Base64.encodeBase64String(digest); } catch (Exception e) { throw new IllegalArgumentException("accessKey", e); } } private synchronized Mac getHmac256() throws NoSuchAlgorithmException, InvalidKeyException { if (this.hmacSha256 == null) { this.hmacSha256 = Mac.getInstance("HmacSHA256"); this.hmacSha256.init(new SecretKeySpec(Base64.decodeBase64(batchSharedKeyCred.keyValue()), "HmacSHA256")); } return this.hmacSha256; } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { try { String authorizationValue = this.signHeader(context.getHttpRequest()); context.getHttpRequest().setHeader("Authorization", authorizationValue); } catch (IOException e) { throw new RuntimeException(e); } return next.process(); } }
class BatchSharedKeyCredentialsPolicy implements HttpPipelinePolicy { private final BatchSharedKeyCredentials batchSharedKeyCred; private Mac hmacSha256; /** * Creates a SharedKey pipeline policy that adds the SharedKey into the request's authorization header. * * @param credential the SharedKey credential used to create the policy. */ public BatchSharedKeyCredentialsPolicy(BatchSharedKeyCredentials credential) { this.batchSharedKeyCred = credential; } /** * @return the {@link BatchSharedKeyCredentials} linked to the policy. */ private String headerValue(HttpRequest request, HttpHeaderName headerName) { HttpHeaders headers = request.getHeaders(); Header header = headers.get(headerName); if (header == null) { return ""; } return header.getValue(); } private synchronized String sign(String stringToSign) { try { byte[] digest = getHmac256().doFinal(stringToSign.getBytes("UTF-8")); return Base64.encodeBase64String(digest); } catch (Exception e) { throw new IllegalArgumentException("accessKey", e); } } private synchronized Mac getHmac256() throws NoSuchAlgorithmException, InvalidKeyException { if (this.hmacSha256 == null) { this.hmacSha256 = Mac.getInstance("HmacSHA256"); this.hmacSha256.init(new SecretKeySpec(Base64.decodeBase64(batchSharedKeyCred.keyValue()), "HmacSHA256")); } return this.hmacSha256; } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { try { String authorizationValue = this.signHeader(context.getHttpRequest()); context.getHttpRequest().setHeader("Authorization", authorizationValue); } catch (IOException e) { throw new RuntimeException(e); } return next.process(); } }
Agreed. I readded back the Date header.
private HttpPipeline createHttpPipeline() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; HttpLogOptions localHttpLogOptions = this.httpLogOptions == null ? new HttpLogOptions() : this.httpLogOptions; ClientOptions localClientOptions = this.clientOptions == null ? new ClientOptions() : this.clientOptions; List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); String applicationId = CoreUtils.getApplicationId(localClientOptions, localHttpLogOptions); policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersFromContextPolicy()); HttpHeaders headers = new HttpHeaders(); localClientOptions.getHeaders().forEach(header -> headers.set(header.getName(), header.getValue())); if (headers.getSize() > 0) { policies.add(new AddHeadersPolicy(headers)); } this.pipelinePolicies.stream() .filter(p -> p.getPipelinePosition() == HttpPipelinePosition.PER_CALL) .forEach(p -> policies.add(p)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions, new RetryPolicy())); policies.add(new CookiePolicy()); if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, DEFAULT_SCOPES)); } else if (batchSharedKeyCred != null) { policies.add(new BatchSharedKeyCredentialsPolicy(batchSharedKeyCred)); } this.pipelinePolicies.stream() .filter(p -> p.getPipelinePosition() == HttpPipelinePosition.PER_RETRY) .forEach(p -> policies.add(p)); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline httpPipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .clientOptions(localClientOptions) .build(); return httpPipeline; }
if (headers.getSize() > 0) {
private HttpPipeline createHttpPipeline() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; HttpLogOptions localHttpLogOptions = this.httpLogOptions == null ? new HttpLogOptions() : this.httpLogOptions; ClientOptions localClientOptions = this.clientOptions == null ? new ClientOptions() : this.clientOptions; List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); String applicationId = CoreUtils.getApplicationId(localClientOptions, localHttpLogOptions); policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersFromContextPolicy()); HttpHeaders headers = new HttpHeaders(); localClientOptions.getHeaders().forEach(header -> headers.set(header.getName(), header.getValue())); if (headers.getSize() > 0) { policies.add(new AddHeadersPolicy(headers)); } this.pipelinePolicies.stream() .filter(p -> p.getPipelinePosition() == HttpPipelinePosition.PER_CALL) .forEach(p -> policies.add(p)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions, new RetryPolicy())); policies.add(new AddDatePolicy()); policies.add(new CookiePolicy()); if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, DEFAULT_SCOPES)); } else if (batchSharedKeyCred != null) { policies.add(new BatchSharedKeyCredentialsPolicy(batchSharedKeyCred)); } this.pipelinePolicies.stream() .filter(p -> p.getPipelinePosition() == HttpPipelinePosition.PER_RETRY) .forEach(p -> policies.add(p)); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline httpPipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .clientOptions(localClientOptions) .build(); return httpPipeline; }
class BatchServiceClientBuilder implements HttpTrait<BatchServiceClientBuilder>, ConfigurationTrait<BatchServiceClientBuilder>, TokenCredentialTrait<BatchServiceClientBuilder>, EndpointTrait<BatchServiceClientBuilder> { @Generated private static final String SDK_NAME = "name"; @Generated private static final String SDK_VERSION = "version"; @Generated private static final String[] DEFAULT_SCOPES = new String[] {"https: @Generated private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-compute-batch.properties"); @Generated private final List<HttpPipelinePolicy> pipelinePolicies; /** Create an instance of the BatchServiceClientBuilder. */ @Generated public BatchServiceClientBuilder() { this.pipelinePolicies = new ArrayList<>(); } /* * The HTTP pipeline to send requests through. */ @Generated private HttpPipeline pipeline; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder pipeline(HttpPipeline pipeline) { this.pipeline = pipeline; return this; } /* * The HTTP client used to send the request. */ @Generated private HttpClient httpClient; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /* * The logging configuration for HTTP requests and responses. */ @Generated private HttpLogOptions httpLogOptions; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) { this.httpLogOptions = httpLogOptions; return this; } /* * The client options such as application ID and custom headers to set on a request. */ @Generated private ClientOptions clientOptions; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /* * The retry options to configure retry policy for failed requests. */ @Generated private RetryOptions retryOptions; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder retryOptions(RetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder addPolicy(HttpPipelinePolicy customPolicy) { Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null."); pipelinePolicies.add(customPolicy); return this; } /* * The configuration store that is used during construction of the service client. */ @Generated private Configuration configuration; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /* * The TokenCredential used for authentication. */ @Generated private TokenCredential tokenCredential; /** {@inheritDoc}. */ @Override public BatchServiceClientBuilder credential(TokenCredential tokenCredential) { this.tokenCredential = tokenCredential; return this; } private BatchSharedKeyCredentials batchSharedKeyCred; public BatchServiceClientBuilder credential(BatchSharedKeyCredentials batchSharedKeyCred) { this.batchSharedKeyCred = Objects.requireNonNull(batchSharedKeyCred, "'batchSharedKeyCred' cannot be null."); this.tokenCredential = null; return this; } /* * The service endpoint */ @Generated private String endpoint; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder endpoint(String endpoint) { this.endpoint = endpoint; return this; } /* * Service version */ @Generated private BatchServiceVersion serviceVersion; /** * Sets Service version. * * @param serviceVersion the serviceVersion value. * @return the BatchServiceClientBuilder. */ @Generated public BatchServiceClientBuilder serviceVersion(BatchServiceVersion serviceVersion) { this.serviceVersion = serviceVersion; return this; } /* * The retry policy that will attempt to retry failed requests, if applicable. */ @Generated private RetryPolicy retryPolicy; /** * Sets The retry policy that will attempt to retry failed requests, if applicable. * * @param retryPolicy the retryPolicy value. * @return the BatchServiceClientBuilder. */ @Generated public BatchServiceClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Builds an instance of BatchServiceClientImpl with the provided parameters. * * @return an instance of BatchServiceClientImpl. */ @Generated private BatchServiceClientImpl buildInnerClient() { HttpPipeline localPipeline = (pipeline != null) ? pipeline : createHttpPipeline(); BatchServiceVersion localServiceVersion = (serviceVersion != null) ? serviceVersion : BatchServiceVersion.getLatest(); BatchServiceClientImpl client = new BatchServiceClientImpl( localPipeline, JacksonAdapter.createDefaultSerializerAdapter(), endpoint, localServiceVersion); return client; } /** * Builds an instance of ApplicationsAsyncClient class. * * @return an instance of ApplicationsAsyncClient. */ @Generated public ApplicationsAsyncClient buildApplicationsAsyncClient() { return new ApplicationsAsyncClient(buildInnerClient().getApplications()); } /** * Builds an instance of PoolAsyncClient class. * * @return an instance of PoolAsyncClient. */ @Generated public PoolAsyncClient buildPoolAsyncClient() { return new PoolAsyncClient(buildInnerClient().getPools()); } /** * Builds an instance of AccountAsyncClient class. * * @return an instance of AccountAsyncClient. */ @Generated public AccountAsyncClient buildAccountAsyncClient() { return new AccountAsyncClient(buildInnerClient().getAccounts()); } /** * Builds an instance of JobAsyncClient class. * * @return an instance of JobAsyncClient. */ @Generated public JobAsyncClient buildJobAsyncClient() { return new JobAsyncClient(buildInnerClient().getJobs()); } /** * Builds an instance of CertificatesAsyncClient class. * * @return an instance of CertificatesAsyncClient. */ @Generated public CertificatesAsyncClient buildCertificatesAsyncClient() { return new CertificatesAsyncClient(buildInnerClient().getCertificates()); } /** * Builds an instance of FileAsyncClient class. * * @return an instance of FileAsyncClient. */ @Generated public FileAsyncClient buildFileAsyncClient() { return new FileAsyncClient(buildInnerClient().getFiles()); } /** * Builds an instance of JobScheduleAsyncClient class. * * @return an instance of JobScheduleAsyncClient. */ @Generated public JobScheduleAsyncClient buildJobScheduleAsyncClient() { return new JobScheduleAsyncClient(buildInnerClient().getJobSchedules()); } /** * Builds an instance of TaskAsyncClient class. * * @return an instance of TaskAsyncClient. */ @Generated public TaskAsyncClient buildTaskAsyncClient() { return new TaskAsyncClient(buildInnerClient().getTasks()); } /** * Builds an instance of ComputeNodesAsyncClient class. * * @return an instance of ComputeNodesAsyncClient. */ @Generated public ComputeNodesAsyncClient buildComputeNodesAsyncClient() { return new ComputeNodesAsyncClient(buildInnerClient().getComputeNodes()); } /** * Builds an instance of ComputeNodeExtensionsAsyncClient class. * * @return an instance of ComputeNodeExtensionsAsyncClient. */ @Generated public ComputeNodeExtensionsAsyncClient buildComputeNodeExtensionsAsyncClient() { return new ComputeNodeExtensionsAsyncClient(buildInnerClient().getComputeNodeExtensions()); } /** * Builds an instance of ApplicationsClient class. * * @return an instance of ApplicationsClient. */ @Generated public ApplicationsClient buildApplicationsClient() { return new ApplicationsClient(new ApplicationsAsyncClient(buildInnerClient().getApplications())); } /** * Builds an instance of PoolClient class. * * @return an instance of PoolClient. */ @Generated public PoolClient buildPoolClient() { return new PoolClient(new PoolAsyncClient(buildInnerClient().getPools())); } /** * Builds an instance of AccountClient class. * * @return an instance of AccountClient. */ @Generated public AccountClient buildAccountClient() { return new AccountClient(new AccountAsyncClient(buildInnerClient().getAccounts())); } /** * Builds an instance of JobClient class. * * @return an instance of JobClient. */ @Generated public JobClient buildJobClient() { return new JobClient(new JobAsyncClient(buildInnerClient().getJobs())); } /** * Builds an instance of CertificatesClient class. * * @return an instance of CertificatesClient. */ @Generated public CertificatesClient buildCertificatesClient() { return new CertificatesClient(new CertificatesAsyncClient(buildInnerClient().getCertificates())); } /** * Builds an instance of FileClient class. * * @return an instance of FileClient. */ @Generated public FileClient buildFileClient() { return new FileClient(new FileAsyncClient(buildInnerClient().getFiles())); } /** * Builds an instance of JobScheduleClient class. * * @return an instance of JobScheduleClient. */ @Generated public JobScheduleClient buildJobScheduleClient() { return new JobScheduleClient(new JobScheduleAsyncClient(buildInnerClient().getJobSchedules())); } /** * Builds an instance of TaskClient class. * * @return an instance of TaskClient. */ @Generated public TaskClient buildTaskClient() { return new TaskClient(new TaskAsyncClient(buildInnerClient().getTasks())); } /** * Builds an instance of ComputeNodesClient class. * * @return an instance of ComputeNodesClient. */ @Generated public ComputeNodesClient buildComputeNodesClient() { return new ComputeNodesClient(new ComputeNodesAsyncClient(buildInnerClient().getComputeNodes())); } /** * Builds an instance of ComputeNodeExtensionsClient class. * * @return an instance of ComputeNodeExtensionsClient. */ @Generated public ComputeNodeExtensionsClient buildComputeNodeExtensionsClient() { return new ComputeNodeExtensionsClient( new ComputeNodeExtensionsAsyncClient(buildInnerClient().getComputeNodeExtensions())); } }
class BatchServiceClientBuilder implements HttpTrait<BatchServiceClientBuilder>, ConfigurationTrait<BatchServiceClientBuilder>, TokenCredentialTrait<BatchServiceClientBuilder>, EndpointTrait<BatchServiceClientBuilder> { @Generated private static final String SDK_NAME = "name"; @Generated private static final String SDK_VERSION = "version"; @Generated private static final String[] DEFAULT_SCOPES = new String[] {"https: @Generated private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-compute-batch.properties"); @Generated private final List<HttpPipelinePolicy> pipelinePolicies; /** Create an instance of the BatchServiceClientBuilder. */ @Generated public BatchServiceClientBuilder() { this.pipelinePolicies = new ArrayList<>(); } /* * The HTTP pipeline to send requests through. */ @Generated private HttpPipeline pipeline; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder pipeline(HttpPipeline pipeline) { this.pipeline = pipeline; return this; } /* * The HTTP client used to send the request. */ @Generated private HttpClient httpClient; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /* * The logging configuration for HTTP requests and responses. */ @Generated private HttpLogOptions httpLogOptions; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) { this.httpLogOptions = httpLogOptions; return this; } /* * The client options such as application ID and custom headers to set on a request. */ @Generated private ClientOptions clientOptions; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /* * The retry options to configure retry policy for failed requests. */ @Generated private RetryOptions retryOptions; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder retryOptions(RetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder addPolicy(HttpPipelinePolicy customPolicy) { Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null."); pipelinePolicies.add(customPolicy); return this; } /* * The configuration store that is used during construction of the service client. */ @Generated private Configuration configuration; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /* * The TokenCredential used for authentication. */ @Generated private TokenCredential tokenCredential; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder credential(TokenCredential tokenCredential) { this.tokenCredential = tokenCredential; return this; } private BatchSharedKeyCredentials batchSharedKeyCred; public BatchServiceClientBuilder credential(BatchSharedKeyCredentials batchSharedKeyCred) { this.batchSharedKeyCred = Objects.requireNonNull(batchSharedKeyCred, "'batchSharedKeyCred' cannot be null."); this.tokenCredential = null; return this; } /* * The service endpoint */ @Generated private String endpoint; /** {@inheritDoc}. */ @Generated @Override public BatchServiceClientBuilder endpoint(String endpoint) { this.endpoint = endpoint; return this; } /* * Service version */ @Generated private BatchServiceVersion serviceVersion; /** * Sets Service version. * * @param serviceVersion the serviceVersion value. * @return the BatchServiceClientBuilder. */ @Generated public BatchServiceClientBuilder serviceVersion(BatchServiceVersion serviceVersion) { this.serviceVersion = serviceVersion; return this; } /* * The retry policy that will attempt to retry failed requests, if applicable. */ @Generated private RetryPolicy retryPolicy; /** * Sets The retry policy that will attempt to retry failed requests, if applicable. * * @param retryPolicy the retryPolicy value. * @return the BatchServiceClientBuilder. */ @Generated public BatchServiceClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Builds an instance of BatchServiceClientImpl with the provided parameters. * * @return an instance of BatchServiceClientImpl. */ @Generated private BatchServiceClientImpl buildInnerClient() { HttpPipeline localPipeline = (pipeline != null) ? pipeline : createHttpPipeline(); BatchServiceVersion localServiceVersion = (serviceVersion != null) ? serviceVersion : BatchServiceVersion.getLatest(); BatchServiceClientImpl client = new BatchServiceClientImpl( localPipeline, JacksonAdapter.createDefaultSerializerAdapter(), endpoint, localServiceVersion); return client; } /** * Builds an instance of ApplicationsAsyncClient class. * * @return an instance of ApplicationsAsyncClient. */ @Generated public ApplicationsAsyncClient buildApplicationsAsyncClient() { return new ApplicationsAsyncClient(buildInnerClient().getApplications()); } /** * Builds an instance of PoolAsyncClient class. * * @return an instance of PoolAsyncClient. */ @Generated public PoolAsyncClient buildPoolAsyncClient() { return new PoolAsyncClient(buildInnerClient().getPools()); } /** * Builds an instance of AccountAsyncClient class. * * @return an instance of AccountAsyncClient. */ @Generated public AccountAsyncClient buildAccountAsyncClient() { return new AccountAsyncClient(buildInnerClient().getAccounts()); } /** * Builds an instance of JobAsyncClient class. * * @return an instance of JobAsyncClient. */ @Generated public JobAsyncClient buildJobAsyncClient() { return new JobAsyncClient(buildInnerClient().getJobs()); } /** * Builds an instance of CertificatesAsyncClient class. * * @return an instance of CertificatesAsyncClient. */ @Generated public CertificatesAsyncClient buildCertificatesAsyncClient() { return new CertificatesAsyncClient(buildInnerClient().getCertificates()); } /** * Builds an instance of FileAsyncClient class. * * @return an instance of FileAsyncClient. */ @Generated public FileAsyncClient buildFileAsyncClient() { return new FileAsyncClient(buildInnerClient().getFiles()); } /** * Builds an instance of JobScheduleAsyncClient class. * * @return an instance of JobScheduleAsyncClient. */ @Generated public JobScheduleAsyncClient buildJobScheduleAsyncClient() { return new JobScheduleAsyncClient(buildInnerClient().getJobSchedules()); } /** * Builds an instance of TaskAsyncClient class. * * @return an instance of TaskAsyncClient. */ @Generated public TaskAsyncClient buildTaskAsyncClient() { return new TaskAsyncClient(buildInnerClient().getTasks()); } /** * Builds an instance of ComputeNodesAsyncClient class. * * @return an instance of ComputeNodesAsyncClient. */ @Generated public ComputeNodesAsyncClient buildComputeNodesAsyncClient() { return new ComputeNodesAsyncClient(buildInnerClient().getComputeNodes()); } /** * Builds an instance of ComputeNodeExtensionsAsyncClient class. * * @return an instance of ComputeNodeExtensionsAsyncClient. */ @Generated public ComputeNodeExtensionsAsyncClient buildComputeNodeExtensionsAsyncClient() { return new ComputeNodeExtensionsAsyncClient(buildInnerClient().getComputeNodeExtensions()); } /** * Builds an instance of ApplicationsClient class. * * @return an instance of ApplicationsClient. */ @Generated public ApplicationsClient buildApplicationsClient() { return new ApplicationsClient(new ApplicationsAsyncClient(buildInnerClient().getApplications())); } /** * Builds an instance of PoolClient class. * * @return an instance of PoolClient. */ @Generated public PoolClient buildPoolClient() { return new PoolClient(new PoolAsyncClient(buildInnerClient().getPools())); } /** * Builds an instance of AccountClient class. * * @return an instance of AccountClient. */ @Generated public AccountClient buildAccountClient() { return new AccountClient(new AccountAsyncClient(buildInnerClient().getAccounts())); } /** * Builds an instance of JobClient class. * * @return an instance of JobClient. */ @Generated public JobClient buildJobClient() { return new JobClient(new JobAsyncClient(buildInnerClient().getJobs())); } /** * Builds an instance of CertificatesClient class. * * @return an instance of CertificatesClient. */ @Generated public CertificatesClient buildCertificatesClient() { return new CertificatesClient(new CertificatesAsyncClient(buildInnerClient().getCertificates())); } /** * Builds an instance of FileClient class. * * @return an instance of FileClient. */ @Generated public FileClient buildFileClient() { return new FileClient(new FileAsyncClient(buildInnerClient().getFiles())); } /** * Builds an instance of JobScheduleClient class. * * @return an instance of JobScheduleClient. */ @Generated public JobScheduleClient buildJobScheduleClient() { return new JobScheduleClient(new JobScheduleAsyncClient(buildInnerClient().getJobSchedules())); } /** * Builds an instance of TaskClient class. * * @return an instance of TaskClient. */ @Generated public TaskClient buildTaskClient() { return new TaskClient(new TaskAsyncClient(buildInnerClient().getTasks())); } /** * Builds an instance of ComputeNodesClient class. * * @return an instance of ComputeNodesClient. */ @Generated public ComputeNodesClient buildComputeNodesClient() { return new ComputeNodesClient(new ComputeNodesAsyncClient(buildInnerClient().getComputeNodes())); } /** * Builds an instance of ComputeNodeExtensionsClient class. * * @return an instance of ComputeNodeExtensionsClient. */ @Generated public ComputeNodeExtensionsClient buildComputeNodeExtensionsClient() { return new ComputeNodeExtensionsClient( new ComputeNodeExtensionsAsyncClient(buildInnerClient().getComputeNodeExtensions())); } }
Discussed offline. ocp-date and date headers cannot both be signed for the Authentication header. If ocp-date is set, lets sign it with the Authentication header. Else if Date header is specified, sign it with it. Else sign with ocp-date set value to now().
public String signHeader(HttpRequest request) throws IOException { if (request.getHeaders().get("ocp-date") == null) { DateTimeRfc1123 rfcDate = new DateTimeRfc1123(now()); request.setHeader("ocp-date", rfcDate.toString()); } StringBuffer signature = new StringBuffer(request.getHttpMethod().toString()); signature.append("\n"); signature.append(headerValue(request, HttpHeaderName.CONTENT_ENCODING)).append("\n"); signature.append(headerValue(request, HttpHeaderName.CONTENT_LANGUAGE)).append("\n"); String contentLength = headerValue(request, HttpHeaderName.CONTENT_LENGTH); signature.append((contentLength == null || Long.parseLong(contentLength) < 0 ? "" : contentLength)).append("\n"); signature.append(headerValue(request, HttpHeaderName.CONTENT_MD5)).append("\n"); String contentType = headerValue(request, HttpHeaderName.CONTENT_TYPE); signature.append(contentType).append("\n"); signature.append(headerValue(request, HttpHeaderName.DATE)).append("\n"); signature.append(headerValue(request, HttpHeaderName.IF_MODIFIED_SINCE)).append("\n"); signature.append(headerValue(request, HttpHeaderName.IF_MATCH)).append("\n"); signature.append(headerValue(request, HttpHeaderName.IF_NONE_MATCH)).append("\n"); signature.append(headerValue(request, HttpHeaderName.IF_UNMODIFIED_SINCE)).append("\n"); signature.append(headerValue(request, HttpHeaderName.RANGE)).append("\n"); ArrayList<String> customHeaders = new ArrayList<>(); for (HttpHeader name : request.getHeaders()) { if (name.getName().toLowerCase().startsWith("ocp-")) { customHeaders.add(name.getName().toLowerCase()); } } Collections.sort(customHeaders); for (String canonicalHeader : customHeaders) { String value = request.getHeaders().getValue(canonicalHeader); value = value.replace('\n', ' ').replace('\r', ' ') .replaceAll("^[ ]+", ""); signature.append(canonicalHeader).append(":").append(value).append("\n"); } signature.append("/") .append(batchSharedKeyCred.accountName().toLowerCase()).append("/") .append(request.getUrl().getPath().replaceAll("^[/]+", "")); String query = request.getUrl().getQuery(); if (query != null) { Map<String, String> queryComponents = new TreeMap<>(); String[] pairs = query.split("&"); for (String pair : pairs) { int idx = pair.indexOf("="); String key = URLDecoder.decode(pair.substring(0, idx), "UTF-8") .toLowerCase(Locale.US); queryComponents.put( key, key + ":" + URLDecoder.decode(pair.substring(idx + 1), "UTF-8")); } for (Map.Entry<String, String> entry : queryComponents.entrySet()) { signature.append("\n").append(entry.getValue()); } } String signedSignature = sign(signature.toString()); String authorization = "SharedKey " + batchSharedKeyCred.accountName() + ":" + signedSignature; return authorization; }
DateTimeRfc1123 rfcDate = new DateTimeRfc1123(now());
public String signHeader(HttpRequest request) throws IOException { String dateHeaderToSign = headerValue(request, HttpHeaderName.DATE); if (request.getHeaders().get("ocp-date") == null) { if (dateHeaderToSign == null) { DateTimeRfc1123 rfcDate = new DateTimeRfc1123(now()); request.setHeader("ocp-date", rfcDate.toString()); dateHeaderToSign = ""; } } else { dateHeaderToSign = ""; } StringBuffer signature = new StringBuffer(request.getHttpMethod().toString()); signature.append("\n"); signature.append(headerValue(request, HttpHeaderName.CONTENT_ENCODING)).append("\n"); signature.append(headerValue(request, HttpHeaderName.CONTENT_LANGUAGE)).append("\n"); String contentLength = headerValue(request, HttpHeaderName.CONTENT_LENGTH); signature.append((contentLength == null || Long.parseLong(contentLength) < 0 ? "" : contentLength)).append("\n"); signature.append(headerValue(request, HttpHeaderName.CONTENT_MD5)).append("\n"); String contentType = headerValue(request, HttpHeaderName.CONTENT_TYPE); signature.append(contentType).append("\n"); signature.append(dateHeaderToSign).append("\n"); signature.append(headerValue(request, HttpHeaderName.IF_MODIFIED_SINCE)).append("\n"); signature.append(headerValue(request, HttpHeaderName.IF_MATCH)).append("\n"); signature.append(headerValue(request, HttpHeaderName.IF_NONE_MATCH)).append("\n"); signature.append(headerValue(request, HttpHeaderName.IF_UNMODIFIED_SINCE)).append("\n"); signature.append(headerValue(request, HttpHeaderName.RANGE)).append("\n"); ArrayList<String> customHeaders = new ArrayList<>(); for (HttpHeader name : request.getHeaders()) { if (name.getName().toLowerCase().startsWith("ocp-")) { customHeaders.add(name.getName().toLowerCase()); } } Collections.sort(customHeaders); for (String canonicalHeader : customHeaders) { String value = request.getHeaders().getValue(canonicalHeader); value = value.replace('\n', ' ').replace('\r', ' ') .replaceAll("^[ ]+", ""); signature.append(canonicalHeader).append(":").append(value).append("\n"); } signature.append("/") .append(batchSharedKeyCred.accountName().toLowerCase()).append("/") .append(request.getUrl().getPath().replaceAll("^[/]+", "")); String query = request.getUrl().getQuery(); if (query != null) { Map<String, String> queryComponents = new TreeMap<>(); String[] pairs = query.split("&"); for (String pair : pairs) { int idx = pair.indexOf("="); String key = URLDecoder.decode(pair.substring(0, idx), "UTF-8") .toLowerCase(Locale.US); queryComponents.put( key, key + ":" + URLDecoder.decode(pair.substring(idx + 1), "UTF-8")); } for (Map.Entry<String, String> entry : queryComponents.entrySet()) { signature.append("\n").append(entry.getValue()); } } String signedSignature = sign(signature.toString()); String authorization = "SharedKey " + batchSharedKeyCred.accountName() + ":" + signedSignature; return authorization; }
class BatchSharedKeyCredentialsPolicy implements HttpPipelinePolicy { private final BatchSharedKeyCredentials batchSharedKeyCred; private Mac hmacSha256; /** * Creates a SharedKey pipeline policy that adds the SharedKey into the request's authorization header. * * @param credential the SharedKey credential used to create the policy. */ public BatchSharedKeyCredentialsPolicy(BatchSharedKeyCredentials credential) { this.batchSharedKeyCred = credential; } /** * @return the {@link BatchSharedKeyCredentials} linked to the policy. */ public BatchSharedKeyCredentials sharedKeyCredential() { return this.batchSharedKeyCred; } private String headerValue(HttpRequest request, HttpHeaderName headerName) { HttpHeaders headers = request.getHeaders(); Header header = headers.get(headerName); if (header == null) { return ""; } return header.getValue(); } private synchronized String sign(String stringToSign) { try { byte[] digest = getHmac256().doFinal(stringToSign.getBytes("UTF-8")); return Base64.encodeBase64String(digest); } catch (Exception e) { throw new IllegalArgumentException("accessKey", e); } } private synchronized Mac getHmac256() throws NoSuchAlgorithmException, InvalidKeyException { if (this.hmacSha256 == null) { this.hmacSha256 = Mac.getInstance("HmacSHA256"); this.hmacSha256.init(new SecretKeySpec(Base64.decodeBase64(batchSharedKeyCred.keyValue()), "HmacSHA256")); } return this.hmacSha256; } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { try { String authorizationValue = this.signHeader(context.getHttpRequest()); context.getHttpRequest().setHeader("Authorization", authorizationValue); } catch (IOException e) { throw new RuntimeException(e); } return next.process(); } }
class BatchSharedKeyCredentialsPolicy implements HttpPipelinePolicy { private final BatchSharedKeyCredentials batchSharedKeyCred; private Mac hmacSha256; /** * Creates a SharedKey pipeline policy that adds the SharedKey into the request's authorization header. * * @param credential the SharedKey credential used to create the policy. */ public BatchSharedKeyCredentialsPolicy(BatchSharedKeyCredentials credential) { this.batchSharedKeyCred = credential; } /** * @return the {@link BatchSharedKeyCredentials} linked to the policy. */ private String headerValue(HttpRequest request, HttpHeaderName headerName) { HttpHeaders headers = request.getHeaders(); Header header = headers.get(headerName); if (header == null) { return ""; } return header.getValue(); } private synchronized String sign(String stringToSign) { try { byte[] digest = getHmac256().doFinal(stringToSign.getBytes("UTF-8")); return Base64.encodeBase64String(digest); } catch (Exception e) { throw new IllegalArgumentException("accessKey", e); } } private synchronized Mac getHmac256() throws NoSuchAlgorithmException, InvalidKeyException { if (this.hmacSha256 == null) { this.hmacSha256 = Mac.getInstance("HmacSHA256"); this.hmacSha256.init(new SecretKeySpec(Base64.decodeBase64(batchSharedKeyCred.keyValue()), "HmacSHA256")); } return this.hmacSha256; } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { try { String authorizationValue = this.signHeader(context.getHttpRequest()); context.getHttpRequest().setHeader("Authorization", authorizationValue); } catch (IOException e) { throw new RuntimeException(e); } return next.process(); } }
`CoreUtils.randomUuid`
private static String getRandomId(int length) { return UUID.randomUUID().toString().replace("-", "").substring(32 - length); }
return UUID.randomUUID().toString().replace("-", "").substring(32 - length);
private static String getRandomId(int length) { return CoreUtils.randomUuid().toString() .replace("-", "") .substring(32 - length); }
class LoggingSpan { public static final LoggingSpan NOOP = new LoggingSpan(); private static final ClientLogger LOGGER = new ClientLogger(LoggingSpan.class); private final String traceId; private final String spanId; private final LoggingEventBuilder log; private final boolean enabled; private LoggingSpan() { this.traceId = null; this.spanId = null; this.log = null; this.enabled = false; } public String getTraceId() { return enabled ? traceId : "00000000000000000000000000000000"; } public String getSpanId() { return enabled ? spanId : "0000000000000000"; } LoggingSpan(String name, SpanKind kind, String traceId, String parentSpanId) { this.traceId = traceId != null ? traceId : getRandomId(32); this.spanId = getRandomId(16); this.log = LOGGER.atInfo() .addKeyValue("traceId", this.traceId) .addKeyValue("spanId", spanId) .addKeyValue("parentSpanId", parentSpanId) .addKeyValue("name", name) .addKeyValue("kind", kind.name()); log.log("span created"); this.enabled = true; } LoggingSpan(String name, SpanKind kind, LoggingSpan parent) { this(name, kind, parent.enabled ? parent.traceId : null, parent.getSpanId()); } public LoggingSpan addKeyValue(String key, Object value) { if (enabled) { log.addKeyValue(key, value); } return this; } public void end(Throwable throwable) { if (enabled) { if (throwable != null) { log.log("span ended", throwable); } else { log.log("span ended"); } } } /** * Generates random id with given length up to 32 chars. */ }
class LoggingSpan { public static final LoggingSpan NOOP = new LoggingSpan(); private static final ClientLogger LOGGER = new ClientLogger(LoggingSpan.class); private final String traceId; private final String spanId; private final LoggingEventBuilder log; private final boolean enabled; private LoggingSpan() { this.traceId = null; this.spanId = null; this.log = null; this.enabled = false; } public String getTraceId() { return enabled ? traceId : "00000000000000000000000000000000"; } public String getSpanId() { return enabled ? spanId : "0000000000000000"; } LoggingSpan(String name, SpanKind kind, String traceId, String parentSpanId) { this.traceId = traceId != null ? traceId : getRandomId(32); this.spanId = getRandomId(16); this.log = LOGGER.atInfo() .addKeyValue("traceId", this.traceId) .addKeyValue("spanId", spanId) .addKeyValue("parentSpanId", parentSpanId) .addKeyValue("name", name) .addKeyValue("kind", kind.name()); log.log("span created"); this.enabled = true; } LoggingSpan(String name, SpanKind kind, LoggingSpan parent) { this(name, kind, parent.enabled ? parent.traceId : null, parent.getSpanId()); } public LoggingSpan addKeyValue(String key, Object value) { if (enabled) { log.addKeyValue(key, value); } return this; } public void end(Throwable throwable) { if (enabled) { if (throwable != null) { log.log("span ended", throwable); } else { log.log("span ended"); } } } /** * Generates random id with given length up to 32 chars. */ }
I think for clarity I'd make this just return the UUID without the `-` and have the caller handle doing the substring. Seeing `32 - length` is fairly confusing and likely could be error prone as you pass `15` for the length but you really get `17`.
private static String getRandomId(int length) { return CoreUtils.randomUuid().toString() .replace("-", "") .substring(32 - length); }
.substring(32 - length);
private static String getRandomId(int length) { return CoreUtils.randomUuid().toString() .replace("-", "") .substring(32 - length); }
class LoggingSpan { public static final LoggingSpan NOOP = new LoggingSpan(); private static final ClientLogger LOGGER = new ClientLogger(LoggingSpan.class); private final String traceId; private final String spanId; private final LoggingEventBuilder log; private final boolean enabled; private LoggingSpan() { this.traceId = null; this.spanId = null; this.log = null; this.enabled = false; } public String getTraceId() { return enabled ? traceId : "00000000000000000000000000000000"; } public String getSpanId() { return enabled ? spanId : "0000000000000000"; } LoggingSpan(String name, SpanKind kind, String traceId, String parentSpanId) { this.traceId = traceId != null ? traceId : getRandomId(32); this.spanId = getRandomId(16); this.log = LOGGER.atInfo() .addKeyValue("traceId", this.traceId) .addKeyValue("spanId", spanId) .addKeyValue("parentSpanId", parentSpanId) .addKeyValue("name", name) .addKeyValue("kind", kind.name()); log.log("span created"); this.enabled = true; } LoggingSpan(String name, SpanKind kind, LoggingSpan parent) { this(name, kind, parent.enabled ? parent.traceId : null, parent.getSpanId()); } public LoggingSpan addKeyValue(String key, Object value) { if (enabled) { log.addKeyValue(key, value); } return this; } public void end(Throwable throwable) { if (enabled) { if (throwable != null) { log.log("span ended", throwable); } else { log.log("span ended"); } } } /** * Generates random id with given length up to 32 chars. */ }
class LoggingSpan { public static final LoggingSpan NOOP = new LoggingSpan(); private static final ClientLogger LOGGER = new ClientLogger(LoggingSpan.class); private final String traceId; private final String spanId; private final LoggingEventBuilder log; private final boolean enabled; private LoggingSpan() { this.traceId = null; this.spanId = null; this.log = null; this.enabled = false; } public String getTraceId() { return enabled ? traceId : "00000000000000000000000000000000"; } public String getSpanId() { return enabled ? spanId : "0000000000000000"; } LoggingSpan(String name, SpanKind kind, String traceId, String parentSpanId) { this.traceId = traceId != null ? traceId : getRandomId(32); this.spanId = getRandomId(16); this.log = LOGGER.atInfo() .addKeyValue("traceId", this.traceId) .addKeyValue("spanId", spanId) .addKeyValue("parentSpanId", parentSpanId) .addKeyValue("name", name) .addKeyValue("kind", kind.name()); log.log("span created"); this.enabled = true; } LoggingSpan(String name, SpanKind kind, LoggingSpan parent) { this(name, kind, parent.enabled ? parent.traceId : null, parent.getSpanId()); } public LoggingSpan addKeyValue(String key, Object value) { if (enabled) { log.addKeyValue(key, value); } return this; } public void end(Throwable throwable) { if (enabled) { if (throwable != null) { log.log("span ended", throwable); } else { log.log("span ended"); } } } /** * Generates random id with given length up to 32 chars. */ }
this is a private method that is used within this file that is supposed to simplify the code that deals with UUIDs. Also, `32 - length` is a start index, so it will return exactly what's asked for. It's still error-prone (length can be bigger than 32), but given it's never meant to be shared outside of this file, simplicity seems more important to me.
private static String getRandomId(int length) { return CoreUtils.randomUuid().toString() .replace("-", "") .substring(32 - length); }
.substring(32 - length);
private static String getRandomId(int length) { return CoreUtils.randomUuid().toString() .replace("-", "") .substring(32 - length); }
class LoggingSpan { public static final LoggingSpan NOOP = new LoggingSpan(); private static final ClientLogger LOGGER = new ClientLogger(LoggingSpan.class); private final String traceId; private final String spanId; private final LoggingEventBuilder log; private final boolean enabled; private LoggingSpan() { this.traceId = null; this.spanId = null; this.log = null; this.enabled = false; } public String getTraceId() { return enabled ? traceId : "00000000000000000000000000000000"; } public String getSpanId() { return enabled ? spanId : "0000000000000000"; } LoggingSpan(String name, SpanKind kind, String traceId, String parentSpanId) { this.traceId = traceId != null ? traceId : getRandomId(32); this.spanId = getRandomId(16); this.log = LOGGER.atInfo() .addKeyValue("traceId", this.traceId) .addKeyValue("spanId", spanId) .addKeyValue("parentSpanId", parentSpanId) .addKeyValue("name", name) .addKeyValue("kind", kind.name()); log.log("span created"); this.enabled = true; } LoggingSpan(String name, SpanKind kind, LoggingSpan parent) { this(name, kind, parent.enabled ? parent.traceId : null, parent.getSpanId()); } public LoggingSpan addKeyValue(String key, Object value) { if (enabled) { log.addKeyValue(key, value); } return this; } public void end(Throwable throwable) { if (enabled) { if (throwable != null) { log.log("span ended", throwable); } else { log.log("span ended"); } } } /** * Generates random id with given length up to 32 chars. */ }
class LoggingSpan { public static final LoggingSpan NOOP = new LoggingSpan(); private static final ClientLogger LOGGER = new ClientLogger(LoggingSpan.class); private final String traceId; private final String spanId; private final LoggingEventBuilder log; private final boolean enabled; private LoggingSpan() { this.traceId = null; this.spanId = null; this.log = null; this.enabled = false; } public String getTraceId() { return enabled ? traceId : "00000000000000000000000000000000"; } public String getSpanId() { return enabled ? spanId : "0000000000000000"; } LoggingSpan(String name, SpanKind kind, String traceId, String parentSpanId) { this.traceId = traceId != null ? traceId : getRandomId(32); this.spanId = getRandomId(16); this.log = LOGGER.atInfo() .addKeyValue("traceId", this.traceId) .addKeyValue("spanId", spanId) .addKeyValue("parentSpanId", parentSpanId) .addKeyValue("name", name) .addKeyValue("kind", kind.name()); log.log("span created"); this.enabled = true; } LoggingSpan(String name, SpanKind kind, LoggingSpan parent) { this(name, kind, parent.enabled ? parent.traceId : null, parent.getSpanId()); } public LoggingSpan addKeyValue(String key, Object value) { if (enabled) { log.addKeyValue(key, value); } return this; } public void end(Throwable throwable) { if (enabled) { if (throwable != null) { log.log("span ended", throwable); } else { log.log("span ended"); } } } /** * Generates random id with given length up to 32 chars. */ }
Is this used in the readme?
public RoomsClientBuilder createRoomsClientBuilder() { RoomsClientBuilder builder = new RoomsClientBuilder(); return builder; }
RoomsClientBuilder builder = new RoomsClientBuilder();
public RoomsClientBuilder createRoomsClientBuilder() { RoomsClientBuilder builder = new RoomsClientBuilder(); return builder; }
class ReadmeSamples { RoomParticipant participant1; RoomParticipant participant2; public RoomsClient createRoomsClientUsingAzureKeyCredential() { String endpoint = "https: AzureKeyCredential azureKeyCredential = new AzureKeyCredential("<access-key>"); RoomsClient roomsClient = new RoomsClientBuilder().endpoint(endpoint).credential(azureKeyCredential) .buildClient(); return roomsClient; } public RoomsAsyncClient createRoomsAsyncClientUsingAzureKeyCredential() { String endpoint = "https: AzureKeyCredential azureKeyCredential = new AzureKeyCredential("<access-key>"); RoomsAsyncClient roomsClient = new RoomsClientBuilder().endpoint(endpoint).credential(azureKeyCredential) .buildAsyncClient(); return roomsClient; } public RoomsClient createRoomsClientWithConnectionString() { String connectionString = "https: RoomsClient roomsClient = new RoomsClientBuilder().connectionString(connectionString).buildClient(); return roomsClient; } public RoomsClient createRoomsClientWithAAD() { String endpoint = "https: RoomsClient roomsClient = new RoomsClientBuilder().endpoint(endpoint) .credential(new DefaultAzureCredentialBuilder().build()).buildClient(); return roomsClient; } public RoomsClient createSyncClientUsingTokenCredential() { TokenCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); String endpoint = "https: RoomsClient roomsClient = new RoomsClientBuilder().endpoint(endpoint).credential(tokenCredential).buildClient(); return roomsClient; } public void createRoomWithValidInput() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); OffsetDateTime validFrom = OffsetDateTime.now(); OffsetDateTime validUntil = validFrom.plusDays(30); List<RoomParticipant> participants = new ArrayList<>(); participant1 = new RoomParticipant(new CommunicationUserIdentifier("<ACS User MRI identity 1>")).setRole(ParticipantRole.ATTENDEE); participant2 = new RoomParticipant(new CommunicationUserIdentifier("<ACS User MRI identity 2>")).setRole(ParticipantRole.CONSUMER); participants.add(participant1); participants.add(participant2); CreateRoomOptions roomOptions = new CreateRoomOptions() .setValidFrom(validFrom) .setValidUntil(validUntil) .setParticipants(participants); CommunicationRoom roomResult = roomsClient.createRoom(roomOptions); System.out.println("Room Id: " + roomResult.getRoomId()); } public void updateRoomWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); OffsetDateTime validFrom = OffsetDateTime.now(); OffsetDateTime validUntil = validFrom.plusDays(30); UpdateRoomOptions updateRoomOptions = new UpdateRoomOptions() .setValidFrom(validFrom) .setValidUntil(validUntil); try { CommunicationRoom roomResult = roomsClient.updateRoom("<Room Id>", updateRoomOptions); System.out.println("Room Id: " + roomResult.getRoomId()); } catch (RuntimeException ex) { System.out.println(ex); } } public void getRoomWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); try { CommunicationRoom roomResult = roomsClient.getRoom("<Room Id>"); System.out.println("Room Id: " + roomResult.getRoomId()); } catch (RuntimeException ex) { System.out.println(ex); } } public void deleteRoomWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); try { roomsClient.deleteRoom("<Room Id>"); } catch (RuntimeException ex) { System.out.println(ex); } } public void addOrUpdateRoomParticipantsWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); List<RoomParticipant> participantsToaddOrUpdate = new ArrayList<>(); RoomParticipant participantToAdd = new RoomParticipant(new CommunicationUserIdentifier("<ACS User MRI identity 3>")).setRole(ParticipantRole.ATTENDEE); participant2 = new RoomParticipant(new CommunicationUserIdentifier("<ACS User MRI identity 2>")).setRole(ParticipantRole.ATTENDEE); participantsToaddOrUpdate.add(participantToAdd); participantsToaddOrUpdate.add(participant2); try { AddOrUpdateParticipantsResult addOrUpdateResult = roomsClient.addOrUpdateParticipants("<Room Id>", participantsToaddOrUpdate); } catch (RuntimeException ex) { System.out.println(ex); } } public void removeRoomParticipantsWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); List<CommunicationIdentifier> participantsToRemove = new ArrayList<>(); participantsToRemove.add(participant1.getCommunicationIdentifier()); participantsToRemove.add(participant2.getCommunicationIdentifier()); try { RemoveParticipantsResult removeResult = roomsClient.removeParticipants("<Room Id>", participantsToRemove); } catch (RuntimeException ex) { System.out.println(ex); } } public void listRoomParticipantsWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); try { PagedIterable<RoomParticipant> allParticipants = roomsClient.listParticipants("<Room Id>"); for (RoomParticipant participant : allParticipants) { System.out.println(participant.getCommunicationIdentifier().getRawId() + " (" + participant.getRole() + ")"); } } catch (RuntimeException ex) { System.out.println(ex); } } }
class ReadmeSamples { RoomParticipant participant1; RoomParticipant participant2; public RoomsClient createRoomsClientUsingAzureKeyCredential() { String endpoint = "https: AzureKeyCredential azureKeyCredential = new AzureKeyCredential("<access-key>"); RoomsClient roomsClient = new RoomsClientBuilder().endpoint(endpoint).credential(azureKeyCredential) .buildClient(); return roomsClient; } public RoomsAsyncClient createRoomsAsyncClientUsingAzureKeyCredential() { String endpoint = "https: AzureKeyCredential azureKeyCredential = new AzureKeyCredential("<access-key>"); RoomsAsyncClient roomsClient = new RoomsClientBuilder().endpoint(endpoint).credential(azureKeyCredential) .buildAsyncClient(); return roomsClient; } public RoomsClient createRoomsClientWithConnectionString() { String connectionString = "https: RoomsClient roomsClient = new RoomsClientBuilder().connectionString(connectionString).buildClient(); return roomsClient; } public RoomsClient createRoomsClientWithAAD() { String endpoint = "https: RoomsClient roomsClient = new RoomsClientBuilder().endpoint(endpoint) .credential(new DefaultAzureCredentialBuilder().build()).buildClient(); return roomsClient; } public RoomsClient createSyncClientUsingTokenCredential() { TokenCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); String endpoint = "https: RoomsClient roomsClient = new RoomsClientBuilder().endpoint(endpoint).credential(tokenCredential).buildClient(); return roomsClient; } public void createRoomWithValidInput() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); OffsetDateTime validFrom = OffsetDateTime.now(); OffsetDateTime validUntil = validFrom.plusDays(30); List<RoomParticipant> participants = new ArrayList<>(); participant1 = new RoomParticipant(new CommunicationUserIdentifier("<ACS User MRI identity 1>")).setRole(ParticipantRole.ATTENDEE); participant2 = new RoomParticipant(new CommunicationUserIdentifier("<ACS User MRI identity 2>")).setRole(ParticipantRole.CONSUMER); participants.add(participant1); participants.add(participant2); CreateRoomOptions roomOptions = new CreateRoomOptions() .setValidFrom(validFrom) .setValidUntil(validUntil) .setParticipants(participants); CommunicationRoom roomResult = roomsClient.createRoom(roomOptions); System.out.println("Room Id: " + roomResult.getRoomId()); } public void updateRoomWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); OffsetDateTime validFrom = OffsetDateTime.now(); OffsetDateTime validUntil = validFrom.plusDays(30); UpdateRoomOptions updateRoomOptions = new UpdateRoomOptions() .setValidFrom(validFrom) .setValidUntil(validUntil); try { CommunicationRoom roomResult = roomsClient.updateRoom("<Room Id>", updateRoomOptions); System.out.println("Room Id: " + roomResult.getRoomId()); } catch (RuntimeException ex) { System.out.println(ex); } } public void getRoomWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); try { CommunicationRoom roomResult = roomsClient.getRoom("<Room Id>"); System.out.println("Room Id: " + roomResult.getRoomId()); } catch (RuntimeException ex) { System.out.println(ex); } } public void deleteRoomWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); try { roomsClient.deleteRoom("<Room Id>"); } catch (RuntimeException ex) { System.out.println(ex); } } public void addOrUpdateRoomParticipantsWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); List<RoomParticipant> participantsToaddOrUpdate = new ArrayList<>(); RoomParticipant participantToAdd = new RoomParticipant(new CommunicationUserIdentifier("<ACS User MRI identity 3>")).setRole(ParticipantRole.ATTENDEE); participant2 = new RoomParticipant(new CommunicationUserIdentifier("<ACS User MRI identity 2>")).setRole(ParticipantRole.ATTENDEE); participantsToaddOrUpdate.add(participantToAdd); participantsToaddOrUpdate.add(participant2); try { AddOrUpdateParticipantsResult addOrUpdateResult = roomsClient.addOrUpdateParticipants("<Room Id>", participantsToaddOrUpdate); } catch (RuntimeException ex) { System.out.println(ex); } } public void removeRoomParticipantsWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); List<CommunicationIdentifier> participantsToRemove = new ArrayList<>(); participantsToRemove.add(participant1.getCommunicationIdentifier()); participantsToRemove.add(participant2.getCommunicationIdentifier()); try { RemoveParticipantsResult removeResult = roomsClient.removeParticipants("<Room Id>", participantsToRemove); } catch (RuntimeException ex) { System.out.println(ex); } } public void listRoomParticipantsWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); try { PagedIterable<RoomParticipant> allParticipants = roomsClient.listParticipants("<Room Id>"); for (RoomParticipant participant : allParticipants) { System.out.println(participant.getCommunicationIdentifier().getRawId() + " (" + participant.getRole() + ")"); } } catch (RuntimeException ex) { System.out.println(ex); } } }
No but used in the client builder java doc comment.
public RoomsClientBuilder createRoomsClientBuilder() { RoomsClientBuilder builder = new RoomsClientBuilder(); return builder; }
RoomsClientBuilder builder = new RoomsClientBuilder();
public RoomsClientBuilder createRoomsClientBuilder() { RoomsClientBuilder builder = new RoomsClientBuilder(); return builder; }
class ReadmeSamples { RoomParticipant participant1; RoomParticipant participant2; public RoomsClient createRoomsClientUsingAzureKeyCredential() { String endpoint = "https: AzureKeyCredential azureKeyCredential = new AzureKeyCredential("<access-key>"); RoomsClient roomsClient = new RoomsClientBuilder().endpoint(endpoint).credential(azureKeyCredential) .buildClient(); return roomsClient; } public RoomsAsyncClient createRoomsAsyncClientUsingAzureKeyCredential() { String endpoint = "https: AzureKeyCredential azureKeyCredential = new AzureKeyCredential("<access-key>"); RoomsAsyncClient roomsClient = new RoomsClientBuilder().endpoint(endpoint).credential(azureKeyCredential) .buildAsyncClient(); return roomsClient; } public RoomsClient createRoomsClientWithConnectionString() { String connectionString = "https: RoomsClient roomsClient = new RoomsClientBuilder().connectionString(connectionString).buildClient(); return roomsClient; } public RoomsClient createRoomsClientWithAAD() { String endpoint = "https: RoomsClient roomsClient = new RoomsClientBuilder().endpoint(endpoint) .credential(new DefaultAzureCredentialBuilder().build()).buildClient(); return roomsClient; } public RoomsClient createSyncClientUsingTokenCredential() { TokenCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); String endpoint = "https: RoomsClient roomsClient = new RoomsClientBuilder().endpoint(endpoint).credential(tokenCredential).buildClient(); return roomsClient; } public void createRoomWithValidInput() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); OffsetDateTime validFrom = OffsetDateTime.now(); OffsetDateTime validUntil = validFrom.plusDays(30); List<RoomParticipant> participants = new ArrayList<>(); participant1 = new RoomParticipant(new CommunicationUserIdentifier("<ACS User MRI identity 1>")).setRole(ParticipantRole.ATTENDEE); participant2 = new RoomParticipant(new CommunicationUserIdentifier("<ACS User MRI identity 2>")).setRole(ParticipantRole.CONSUMER); participants.add(participant1); participants.add(participant2); CreateRoomOptions roomOptions = new CreateRoomOptions() .setValidFrom(validFrom) .setValidUntil(validUntil) .setParticipants(participants); CommunicationRoom roomResult = roomsClient.createRoom(roomOptions); System.out.println("Room Id: " + roomResult.getRoomId()); } public void updateRoomWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); OffsetDateTime validFrom = OffsetDateTime.now(); OffsetDateTime validUntil = validFrom.plusDays(30); UpdateRoomOptions updateRoomOptions = new UpdateRoomOptions() .setValidFrom(validFrom) .setValidUntil(validUntil); try { CommunicationRoom roomResult = roomsClient.updateRoom("<Room Id>", updateRoomOptions); System.out.println("Room Id: " + roomResult.getRoomId()); } catch (RuntimeException ex) { System.out.println(ex); } } public void getRoomWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); try { CommunicationRoom roomResult = roomsClient.getRoom("<Room Id>"); System.out.println("Room Id: " + roomResult.getRoomId()); } catch (RuntimeException ex) { System.out.println(ex); } } public void deleteRoomWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); try { roomsClient.deleteRoom("<Room Id>"); } catch (RuntimeException ex) { System.out.println(ex); } } public void addOrUpdateRoomParticipantsWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); List<RoomParticipant> participantsToaddOrUpdate = new ArrayList<>(); RoomParticipant participantToAdd = new RoomParticipant(new CommunicationUserIdentifier("<ACS User MRI identity 3>")).setRole(ParticipantRole.ATTENDEE); participant2 = new RoomParticipant(new CommunicationUserIdentifier("<ACS User MRI identity 2>")).setRole(ParticipantRole.ATTENDEE); participantsToaddOrUpdate.add(participantToAdd); participantsToaddOrUpdate.add(participant2); try { AddOrUpdateParticipantsResult addOrUpdateResult = roomsClient.addOrUpdateParticipants("<Room Id>", participantsToaddOrUpdate); } catch (RuntimeException ex) { System.out.println(ex); } } public void removeRoomParticipantsWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); List<CommunicationIdentifier> participantsToRemove = new ArrayList<>(); participantsToRemove.add(participant1.getCommunicationIdentifier()); participantsToRemove.add(participant2.getCommunicationIdentifier()); try { RemoveParticipantsResult removeResult = roomsClient.removeParticipants("<Room Id>", participantsToRemove); } catch (RuntimeException ex) { System.out.println(ex); } } public void listRoomParticipantsWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); try { PagedIterable<RoomParticipant> allParticipants = roomsClient.listParticipants("<Room Id>"); for (RoomParticipant participant : allParticipants) { System.out.println(participant.getCommunicationIdentifier().getRawId() + " (" + participant.getRole() + ")"); } } catch (RuntimeException ex) { System.out.println(ex); } } }
class ReadmeSamples { RoomParticipant participant1; RoomParticipant participant2; public RoomsClient createRoomsClientUsingAzureKeyCredential() { String endpoint = "https: AzureKeyCredential azureKeyCredential = new AzureKeyCredential("<access-key>"); RoomsClient roomsClient = new RoomsClientBuilder().endpoint(endpoint).credential(azureKeyCredential) .buildClient(); return roomsClient; } public RoomsAsyncClient createRoomsAsyncClientUsingAzureKeyCredential() { String endpoint = "https: AzureKeyCredential azureKeyCredential = new AzureKeyCredential("<access-key>"); RoomsAsyncClient roomsClient = new RoomsClientBuilder().endpoint(endpoint).credential(azureKeyCredential) .buildAsyncClient(); return roomsClient; } public RoomsClient createRoomsClientWithConnectionString() { String connectionString = "https: RoomsClient roomsClient = new RoomsClientBuilder().connectionString(connectionString).buildClient(); return roomsClient; } public RoomsClient createRoomsClientWithAAD() { String endpoint = "https: RoomsClient roomsClient = new RoomsClientBuilder().endpoint(endpoint) .credential(new DefaultAzureCredentialBuilder().build()).buildClient(); return roomsClient; } public RoomsClient createSyncClientUsingTokenCredential() { TokenCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); String endpoint = "https: RoomsClient roomsClient = new RoomsClientBuilder().endpoint(endpoint).credential(tokenCredential).buildClient(); return roomsClient; } public void createRoomWithValidInput() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); OffsetDateTime validFrom = OffsetDateTime.now(); OffsetDateTime validUntil = validFrom.plusDays(30); List<RoomParticipant> participants = new ArrayList<>(); participant1 = new RoomParticipant(new CommunicationUserIdentifier("<ACS User MRI identity 1>")).setRole(ParticipantRole.ATTENDEE); participant2 = new RoomParticipant(new CommunicationUserIdentifier("<ACS User MRI identity 2>")).setRole(ParticipantRole.CONSUMER); participants.add(participant1); participants.add(participant2); CreateRoomOptions roomOptions = new CreateRoomOptions() .setValidFrom(validFrom) .setValidUntil(validUntil) .setParticipants(participants); CommunicationRoom roomResult = roomsClient.createRoom(roomOptions); System.out.println("Room Id: " + roomResult.getRoomId()); } public void updateRoomWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); OffsetDateTime validFrom = OffsetDateTime.now(); OffsetDateTime validUntil = validFrom.plusDays(30); UpdateRoomOptions updateRoomOptions = new UpdateRoomOptions() .setValidFrom(validFrom) .setValidUntil(validUntil); try { CommunicationRoom roomResult = roomsClient.updateRoom("<Room Id>", updateRoomOptions); System.out.println("Room Id: " + roomResult.getRoomId()); } catch (RuntimeException ex) { System.out.println(ex); } } public void getRoomWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); try { CommunicationRoom roomResult = roomsClient.getRoom("<Room Id>"); System.out.println("Room Id: " + roomResult.getRoomId()); } catch (RuntimeException ex) { System.out.println(ex); } } public void deleteRoomWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); try { roomsClient.deleteRoom("<Room Id>"); } catch (RuntimeException ex) { System.out.println(ex); } } public void addOrUpdateRoomParticipantsWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); List<RoomParticipant> participantsToaddOrUpdate = new ArrayList<>(); RoomParticipant participantToAdd = new RoomParticipant(new CommunicationUserIdentifier("<ACS User MRI identity 3>")).setRole(ParticipantRole.ATTENDEE); participant2 = new RoomParticipant(new CommunicationUserIdentifier("<ACS User MRI identity 2>")).setRole(ParticipantRole.ATTENDEE); participantsToaddOrUpdate.add(participantToAdd); participantsToaddOrUpdate.add(participant2); try { AddOrUpdateParticipantsResult addOrUpdateResult = roomsClient.addOrUpdateParticipants("<Room Id>", participantsToaddOrUpdate); } catch (RuntimeException ex) { System.out.println(ex); } } public void removeRoomParticipantsWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); List<CommunicationIdentifier> participantsToRemove = new ArrayList<>(); participantsToRemove.add(participant1.getCommunicationIdentifier()); participantsToRemove.add(participant2.getCommunicationIdentifier()); try { RemoveParticipantsResult removeResult = roomsClient.removeParticipants("<Room Id>", participantsToRemove); } catch (RuntimeException ex) { System.out.println(ex); } } public void listRoomParticipantsWithRoomId() { RoomsClient roomsClient = createRoomsClientWithConnectionString(); try { PagedIterable<RoomParticipant> allParticipants = roomsClient.listParticipants("<Room Id>"); for (RoomParticipant participant : allParticipants) { System.out.println(participant.getCommunicationIdentifier().getRawId() + " (" + participant.getRole() + ")"); } } catch (RuntimeException ex) { System.out.println(ex); } } }
Should we add a changelog for this change?
public void init() { loadAzureVmMetaData(); if (this.isClientTelemetryEnabled()) { sendClientTelemetry().subscribe(); } }
sendClientTelemetry().subscribe();
public void init() { loadAzureVmMetaData(); if (this.isClientTelemetryEnabled()) { sendClientTelemetry().subscribe(); } }
class ClientTelemetry { public final static boolean DEFAULT_CLIENT_TELEMETRY_ENABLED = false; public final static String VM_ID_PREFIX = "vmId_"; public final static int ONE_KB_TO_BYTES = 1024; public final static int REQUEST_LATENCY_MAX_MILLI_SEC = 300000; public final static int REQUEST_LATENCY_SUCCESS_PRECISION = 4; public final static int REQUEST_LATENCY_FAILURE_PRECISION = 2; public final static String REQUEST_LATENCY_NAME = "RequestLatency"; public final static String REQUEST_LATENCY_UNIT = "MilliSecond"; public final static int REQUEST_CHARGE_MAX = 10000; public final static int REQUEST_CHARGE_PRECISION = 2; public final static String REQUEST_CHARGE_NAME = "RequestCharge"; public final static String REQUEST_CHARGE_UNIT = "RU"; public final static String TCP_NEW_CHANNEL_LATENCY_NAME = "TcpNewChannelOpenLatency"; public final static String TCP_NEW_CHANNEL_LATENCY_UNIT = "MilliSecond"; public final static int TCP_NEW_CHANNEL_LATENCY_MAX_MILLI_SEC = 300000; public final static int TCP_NEW_CHANNEL_LATENCY_PRECISION = 2; public final static int CPU_MAX = 100; public final static int CPU_PRECISION = 2; private final static String CPU_NAME = "CPU"; private final static String CPU_UNIT = "Percentage"; public final static int MEMORY_MAX_IN_MB = 102400; public final static int MEMORY_PRECISION = 2; private final static String MEMORY_NAME = "MemoryRemaining"; private final static String MEMORY_UNIT = "MB"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private final static AtomicLong instanceCount = new AtomicLong(0); private final static AtomicReference<AzureVMMetadata> azureVmMetaDataSingleton = new AtomicReference<>(null); private final ClientTelemetryInfo clientTelemetryInfo; private final boolean clientTelemetryConfigEnabled; private final boolean clientMetricsEnabled; private final Configs configs; private final CosmosClientTelemetryConfig clientTelemetryConfig; private final HttpClient httpClient; private final HttpClient metadataHttpClient; private final ScheduledThreadPoolExecutor scheduledExecutorService = new ScheduledThreadPoolExecutor(1, new CosmosDaemonThreadFactory("ClientTelemetry-" + instanceCount.incrementAndGet())); private final Scheduler scheduler = Schedulers.fromExecutor(scheduledExecutorService); private static final Logger logger = LoggerFactory.getLogger(ClientTelemetry.class); private volatile boolean isClosed; private static final double PERCENTILE_50 = 50.0; private static final double PERCENTILE_90 = 90.0; private static final double PERCENTILE_95 = 95.0; private static final double PERCENTILE_99 = 99.0; private static final double PERCENTILE_999 = 99.9; private static final String USER_AGENT = Utils.getUserAgent(); private final int clientTelemetrySchedulingSec; private final IAuthorizationTokenProvider tokenProvider; private final String globalDatabaseAccountName; public ClientTelemetry(DiagnosticsClientContext diagnosticsClientContext, Boolean acceleratedNetworking, String clientId, String processId, ConnectionMode connectionMode, String globalDatabaseAccountName, String applicationRegion, String hostEnvInfo, Configs configs, CosmosClientTelemetryConfig clientTelemetryConfig, IAuthorizationTokenProvider tokenProvider, List<String> preferredRegions ) { clientTelemetryInfo = new ClientTelemetryInfo( getMachineId(diagnosticsClientContext.getConfig()), clientId, processId, USER_AGENT, connectionMode, globalDatabaseAccountName, applicationRegion, hostEnvInfo, acceleratedNetworking, preferredRegions); checkNotNull(clientTelemetryConfig, "Argument 'clientTelemetryConfig' cannot be null"); this.isClosed = false; this.configs = configs; this.clientTelemetryConfig = clientTelemetryConfig; ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor clientTelemetryAccessor = ImplementationBridgeHelpers .CosmosClientTelemetryConfigHelper .getCosmosClientTelemetryConfigAccessor(); assert(clientTelemetryAccessor != null); this.clientTelemetryConfigEnabled = clientTelemetryAccessor .isSendClientTelemetryToServiceEnabled(clientTelemetryConfig); this.clientMetricsEnabled = clientTelemetryAccessor .isClientMetricsEnabled(clientTelemetryConfig); this.httpClient = getHttpClientForClientTelemetry(); this.metadataHttpClient = getHttpClientForIMDS(); this.clientTelemetrySchedulingSec = Configs.getClientTelemetrySchedulingInSec(); this.tokenProvider = tokenProvider; this.globalDatabaseAccountName = globalDatabaseAccountName; } public ClientTelemetryInfo getClientTelemetryInfo() { return clientTelemetryInfo; } @JsonIgnore public CosmosClientTelemetryConfig getClientTelemetryConfig() { return clientTelemetryConfig; } public static String getMachineId(DiagnosticsClientContext.DiagnosticsClientConfig diagnosticsClientConfig) { AzureVMMetadata metadataSnapshot = azureVmMetaDataSingleton.get(); if (metadataSnapshot != null && metadataSnapshot.getVmId() != null) { String machineId = VM_ID_PREFIX + metadataSnapshot.getVmId(); if (diagnosticsClientConfig != null) { diagnosticsClientConfig.withMachineId(machineId); } return machineId; } if (diagnosticsClientConfig == null) { return ""; } return diagnosticsClientConfig.getMachineId(); } public static void recordValue(ConcurrentDoubleHistogram doubleHistogram, long value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public static void recordValue(ConcurrentDoubleHistogram doubleHistogram, double value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public boolean isClientTelemetryEnabled() { return this.clientTelemetryConfigEnabled; } public boolean isClientMetricsEnabled() { return this.clientMetricsEnabled; } public void close() { this.isClosed = true; this.scheduledExecutorService.shutdown(); logger.debug("GlobalEndpointManager closed."); } private HttpClient getHttpClientForClientTelemetry() { ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper .CosmosClientTelemetryConfigAccessor clientTelemetryConfigAccessor = ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper .getCosmosClientTelemetryConfigAccessor(); HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs) .withMaxIdleConnectionTimeout( clientTelemetryConfigAccessor.getIdleHttpConnectionTimeout(this.clientTelemetryConfig)) .withPoolSize(clientTelemetryConfigAccessor.getMaxConnectionPoolSize(this.clientTelemetryConfig)) .withProxy(clientTelemetryConfigAccessor.getProxy(this.clientTelemetryConfig)) .withNetworkRequestTimeout( clientTelemetryConfigAccessor.getHttpNetworkRequestTimeout(this.clientTelemetryConfig)); return HttpClient.createFixed(httpClientConfig); } private HttpClient getHttpClientForIMDS() { HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs) .withMaxIdleConnectionTimeout(IMDSConfig.DEFAULT_IDLE_CONNECTION_TIMEOUT) .withPoolSize(IMDSConfig.DEFAULT_MAX_CONNECTION_POOL_SIZE) .withNetworkRequestTimeout(IMDSConfig.DEFAULT_NETWORK_REQUEST_TIMEOUT); return HttpClient.createFixed(httpClientConfig); } private Mono<Void> sendClientTelemetry() { return Mono.delay(Duration.ofSeconds(clientTelemetrySchedulingSec), CosmosSchedulers.COSMOS_PARALLEL) .flatMap(t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } if (!this.isClientTelemetryEnabled()) { logger.trace("client telemetry not enabled"); return Mono.empty(); } readHistogram(); try { String endpoint = Configs.getClientTelemetryEndpoint(); if (StringUtils.isEmpty(endpoint)) { logger.info("ClientTelemetry {}", OBJECT_MAPPER.writeValueAsString(this.clientTelemetryInfo)); clearDataForNextRun(); return this.sendClientTelemetry(); } else { URI targetEndpoint = new URI(endpoint); ByteBuffer byteBuffer = InternalObjectNode.serializeJsonToByteBuffer(this.clientTelemetryInfo, ClientTelemetry.OBJECT_MAPPER, null); byte[] tempBuffer = RxDocumentServiceRequest.toByteArray(byteBuffer); Map<String, String> headers = new HashMap<>(); String date = Utils.nowAsRFC1123(); headers.put(HttpConstants.HttpHeaders.X_DATE, date); String authorization = this.tokenProvider.getUserAuthorizationToken( "", ResourceType.ClientTelemetry, RequestVerb.POST, headers, AuthorizationTokenType.PrimaryMasterKey, null); try { authorization = URLEncoder.encode(authorization, Constants.UrlEncodingInfo.UTF_8); } catch (UnsupportedEncodingException e) { logger.error("Failed to encode authToken. Exception: ", e); this.clearDataForNextRun(); return this.sendClientTelemetry(); } HttpHeaders httpHeaders = new HttpHeaders(); httpHeaders.set(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON); httpHeaders.set(HttpConstants.HttpHeaders.CONTENT_ENCODING, RuntimeConstants.Encoding.GZIP); httpHeaders.set(HttpConstants.HttpHeaders.X_DATE, date); httpHeaders.set(HttpConstants.HttpHeaders.DATABASE_ACCOUNT_NAME, this.globalDatabaseAccountName); httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); String envName = Configs.getEnvironmentName(); if (StringUtils.isNotEmpty(envName)) { httpHeaders.set(HttpConstants.HttpHeaders.ENVIRONMENT_NAME, envName); } HttpRequest httpRequest = new HttpRequest(HttpMethod.POST, targetEndpoint, targetEndpoint.getPort(), httpHeaders) .withBody(tempBuffer); Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds())); return httpResponseMono.flatMap(response -> { if (response.statusCode() != HttpConstants.StatusCodes.NO_CONTENT) { logger.error("Client telemetry request did not succeeded, status code {}, request body {}", response.statusCode(), new String(tempBuffer, StandardCharsets.UTF_8)); response.bodyAsString().doOnSuccess(responsePayload -> { logger.error("Client telemetry request did not succeeded, status code {}, request body {}, response body {}", response.statusCode(), new String(tempBuffer, StandardCharsets.UTF_8), responsePayload); }).subscribe(); } this.clearDataForNextRun(); return this.sendClientTelemetry(); }).onErrorResume(throwable -> { logger.error("Error while sending client telemetry request Exception: ", throwable); this.clearDataForNextRun(); return this.sendClientTelemetry(); }); } } catch (JsonProcessingException | URISyntaxException ex) { logger.error("Error while preparing client telemetry. Exception: ", ex); this.clearDataForNextRun(); return this.sendClientTelemetry(); } }).onErrorResume(ex -> { logger.error("sendClientTelemetry() - Unable to send client telemetry" + ". Exception: ", ex); clearDataForNextRun(); return this.sendClientTelemetry(); }).subscribeOn(scheduler); } private void populateAzureVmMetaData(AzureVMMetadata azureVMMetadata) { this.clientTelemetryInfo.setApplicationRegion(azureVMMetadata.getLocation()); this.clientTelemetryInfo.setMachineId(VM_ID_PREFIX + azureVMMetadata.getVmId()); this.clientTelemetryInfo.setHostEnvInfo(azureVMMetadata.getOsType() + "|" + azureVMMetadata.getSku() + "|" + azureVMMetadata.getVmSize() + "|" + azureVMMetadata.getAzEnvironment()); } private void loadAzureVmMetaData() { AzureVMMetadata metadataSnapshot = azureVmMetaDataSingleton.get(); if (metadataSnapshot != null) { this.populateAzureVmMetaData(metadataSnapshot); return; } URI targetEndpoint = null; try { targetEndpoint = new URI(IMDSConfig.AZURE_VM_METADATA); } catch (URISyntaxException ex) { logger.info("Unable to parse azure vm metadata url"); return; } HashMap<String, String> headers = new HashMap<>(); headers.put("Metadata", "true"); HttpHeaders httpHeaders = new HttpHeaders(headers); HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders); Mono<HttpResponse> httpResponseMono = this.metadataHttpClient.send(httpRequest); httpResponseMono .flatMap(response -> response.bodyAsString()).map(metadataJson -> parse(metadataJson, AzureVMMetadata.class)).doOnSuccess(metadata -> { azureVmMetaDataSingleton.compareAndSet(null, metadata); this.populateAzureVmMetaData(metadata); }).onErrorResume(throwable -> { logger.info("Client is not on azure vm"); logger.debug("Unable to get azure vm metadata", throwable); return Mono.empty(); }).subscribe(); } private static <T> T parse(String itemResponseBodyAsString, Class<T> itemClassType) { try { return OBJECT_MAPPER.readValue(itemResponseBodyAsString, itemClassType); } catch (IOException e) { throw new IllegalStateException( "Failed to parse string [" + itemResponseBodyAsString + "] to POJO.", e); } } private void clearDataForNextRun() { this.clientTelemetryInfo.getSystemInfoMap().clear(); this.clientTelemetryInfo.getOperationInfoMap().clear(); this.clientTelemetryInfo.getCacheRefreshInfoMap().clear(); for (ConcurrentDoubleHistogram histogram : this.clientTelemetryInfo.getSystemInfoMap().values()) { histogram.reset(); } } private void readHistogram() { ConcurrentDoubleHistogram cpuHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.CPU_MAX, ClientTelemetry.CPU_PRECISION); cpuHistogram.setAutoResize(true); for (double val : CpuMemoryMonitor.getClientTelemetryCpuLatestList()) { recordValue(cpuHistogram, val); } ReportPayload cpuReportPayload = new ReportPayload(CPU_NAME, CPU_UNIT); clientTelemetryInfo.getSystemInfoMap().put(cpuReportPayload, cpuHistogram); ConcurrentDoubleHistogram memoryHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.MEMORY_MAX_IN_MB, ClientTelemetry.MEMORY_PRECISION); memoryHistogram.setAutoResize(true); for (double val : CpuMemoryMonitor.getClientTelemetryMemoryLatestList()) { recordValue(memoryHistogram, val); } ReportPayload memoryReportPayload = new ReportPayload(MEMORY_NAME, MEMORY_UNIT); clientTelemetryInfo.getSystemInfoMap().put(memoryReportPayload, memoryHistogram); this.clientTelemetryInfo.setTimeStamp(Instant.now().toString()); for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getSystemInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getCacheRefreshInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getOperationInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } } private void fillMetricsInfo(ReportPayload payload, ConcurrentDoubleHistogram histogram) { DoubleHistogram copyHistogram = histogram.copy(); payload.getMetricInfo().setCount(copyHistogram.getTotalCount()); payload.getMetricInfo().setMax(copyHistogram.getMaxValue()); payload.getMetricInfo().setMin(copyHistogram.getMinValue()); payload.getMetricInfo().setMean(copyHistogram.getMean()); Map<Double, Double> percentile = new HashMap<>(); percentile.put(PERCENTILE_50, copyHistogram.getValueAtPercentile(PERCENTILE_50)); percentile.put(PERCENTILE_90, copyHistogram.getValueAtPercentile(PERCENTILE_90)); percentile.put(PERCENTILE_95, copyHistogram.getValueAtPercentile(PERCENTILE_95)); percentile.put(PERCENTILE_99, copyHistogram.getValueAtPercentile(PERCENTILE_99)); percentile.put(PERCENTILE_999, copyHistogram.getValueAtPercentile(PERCENTILE_999)); payload.getMetricInfo().setPercentiles(percentile); } static class IMDSConfig { private static String AZURE_VM_METADATA = "http: private static final Duration DEFAULT_NETWORK_REQUEST_TIMEOUT = Duration.ofSeconds(60); private static final Duration DEFAULT_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60); private static final int DEFAULT_MAX_CONNECTION_POOL_SIZE = 1000; } }
class ClientTelemetry { public final static boolean DEFAULT_CLIENT_TELEMETRY_ENABLED = false; public final static String VM_ID_PREFIX = "vmId_"; public final static int ONE_KB_TO_BYTES = 1024; public final static int REQUEST_LATENCY_MAX_MILLI_SEC = 300000; public final static int REQUEST_LATENCY_SUCCESS_PRECISION = 4; public final static int REQUEST_LATENCY_FAILURE_PRECISION = 2; public final static String REQUEST_LATENCY_NAME = "RequestLatency"; public final static String REQUEST_LATENCY_UNIT = "MilliSecond"; public final static int REQUEST_CHARGE_MAX = 10000; public final static int REQUEST_CHARGE_PRECISION = 2; public final static String REQUEST_CHARGE_NAME = "RequestCharge"; public final static String REQUEST_CHARGE_UNIT = "RU"; public final static String TCP_NEW_CHANNEL_LATENCY_NAME = "TcpNewChannelOpenLatency"; public final static String TCP_NEW_CHANNEL_LATENCY_UNIT = "MilliSecond"; public final static int TCP_NEW_CHANNEL_LATENCY_MAX_MILLI_SEC = 300000; public final static int TCP_NEW_CHANNEL_LATENCY_PRECISION = 2; public final static int CPU_MAX = 100; public final static int CPU_PRECISION = 2; private final static String CPU_NAME = "CPU"; private final static String CPU_UNIT = "Percentage"; public final static int MEMORY_MAX_IN_MB = 102400; public final static int MEMORY_PRECISION = 2; private final static String MEMORY_NAME = "MemoryRemaining"; private final static String MEMORY_UNIT = "MB"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private final static AtomicLong instanceCount = new AtomicLong(0); private final static AtomicReference<AzureVMMetadata> azureVmMetaDataSingleton = new AtomicReference<>(null); private final ClientTelemetryInfo clientTelemetryInfo; private final boolean clientTelemetryConfigEnabled; private final boolean clientMetricsEnabled; private final Configs configs; private final CosmosClientTelemetryConfig clientTelemetryConfig; private final HttpClient httpClient; private final HttpClient metadataHttpClient; private final ScheduledThreadPoolExecutor scheduledExecutorService = new ScheduledThreadPoolExecutor(1, new CosmosDaemonThreadFactory("ClientTelemetry-" + instanceCount.incrementAndGet())); private final Scheduler scheduler = Schedulers.fromExecutor(scheduledExecutorService); private static final Logger logger = LoggerFactory.getLogger(ClientTelemetry.class); private volatile boolean isClosed; private static final double PERCENTILE_50 = 50.0; private static final double PERCENTILE_90 = 90.0; private static final double PERCENTILE_95 = 95.0; private static final double PERCENTILE_99 = 99.0; private static final double PERCENTILE_999 = 99.9; private static final String USER_AGENT = Utils.getUserAgent(); private final int clientTelemetrySchedulingSec; private final IAuthorizationTokenProvider tokenProvider; private final String globalDatabaseAccountName; public ClientTelemetry(DiagnosticsClientContext diagnosticsClientContext, Boolean acceleratedNetworking, String clientId, String processId, ConnectionMode connectionMode, String globalDatabaseAccountName, String applicationRegion, String hostEnvInfo, Configs configs, CosmosClientTelemetryConfig clientTelemetryConfig, IAuthorizationTokenProvider tokenProvider, List<String> preferredRegions ) { clientTelemetryInfo = new ClientTelemetryInfo( getMachineId(diagnosticsClientContext.getConfig()), clientId, processId, USER_AGENT, connectionMode, globalDatabaseAccountName, applicationRegion, hostEnvInfo, acceleratedNetworking, preferredRegions); checkNotNull(clientTelemetryConfig, "Argument 'clientTelemetryConfig' cannot be null"); this.isClosed = false; this.configs = configs; this.clientTelemetryConfig = clientTelemetryConfig; ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor clientTelemetryAccessor = ImplementationBridgeHelpers .CosmosClientTelemetryConfigHelper .getCosmosClientTelemetryConfigAccessor(); assert(clientTelemetryAccessor != null); this.clientTelemetryConfigEnabled = clientTelemetryAccessor .isSendClientTelemetryToServiceEnabled(clientTelemetryConfig); this.clientMetricsEnabled = clientTelemetryAccessor .isClientMetricsEnabled(clientTelemetryConfig); this.httpClient = getHttpClientForClientTelemetry(); this.metadataHttpClient = getHttpClientForIMDS(); this.clientTelemetrySchedulingSec = Configs.getClientTelemetrySchedulingInSec(); this.tokenProvider = tokenProvider; this.globalDatabaseAccountName = globalDatabaseAccountName; } public ClientTelemetryInfo getClientTelemetryInfo() { return clientTelemetryInfo; } @JsonIgnore public CosmosClientTelemetryConfig getClientTelemetryConfig() { return clientTelemetryConfig; } public static String getMachineId(DiagnosticsClientContext.DiagnosticsClientConfig diagnosticsClientConfig) { AzureVMMetadata metadataSnapshot = azureVmMetaDataSingleton.get(); if (metadataSnapshot != null && metadataSnapshot.getVmId() != null) { String machineId = VM_ID_PREFIX + metadataSnapshot.getVmId(); if (diagnosticsClientConfig != null) { diagnosticsClientConfig.withMachineId(machineId); } return machineId; } if (diagnosticsClientConfig == null) { return ""; } return diagnosticsClientConfig.getMachineId(); } public static void recordValue(ConcurrentDoubleHistogram doubleHistogram, long value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public static void recordValue(ConcurrentDoubleHistogram doubleHistogram, double value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public boolean isClientTelemetryEnabled() { return this.clientTelemetryConfigEnabled; } public boolean isClientMetricsEnabled() { return this.clientMetricsEnabled; } public void close() { this.isClosed = true; this.scheduledExecutorService.shutdown(); logger.debug("GlobalEndpointManager closed."); } private HttpClient getHttpClientForClientTelemetry() { ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper .CosmosClientTelemetryConfigAccessor clientTelemetryConfigAccessor = ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper .getCosmosClientTelemetryConfigAccessor(); HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs) .withMaxIdleConnectionTimeout( clientTelemetryConfigAccessor.getIdleHttpConnectionTimeout(this.clientTelemetryConfig)) .withPoolSize(clientTelemetryConfigAccessor.getMaxConnectionPoolSize(this.clientTelemetryConfig)) .withProxy(clientTelemetryConfigAccessor.getProxy(this.clientTelemetryConfig)) .withNetworkRequestTimeout( clientTelemetryConfigAccessor.getHttpNetworkRequestTimeout(this.clientTelemetryConfig)); return HttpClient.createFixed(httpClientConfig); } private HttpClient getHttpClientForIMDS() { HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs) .withMaxIdleConnectionTimeout(IMDSConfig.DEFAULT_IDLE_CONNECTION_TIMEOUT) .withPoolSize(IMDSConfig.DEFAULT_MAX_CONNECTION_POOL_SIZE) .withNetworkRequestTimeout(IMDSConfig.DEFAULT_NETWORK_REQUEST_TIMEOUT); return HttpClient.createFixed(httpClientConfig); } private Mono<Void> sendClientTelemetry() { return Mono.delay(Duration.ofSeconds(clientTelemetrySchedulingSec), CosmosSchedulers.COSMOS_PARALLEL) .flatMap(t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } if (!this.isClientTelemetryEnabled()) { logger.trace("client telemetry not enabled"); return Mono.empty(); } readHistogram(); try { String endpoint = Configs.getClientTelemetryEndpoint(); if (StringUtils.isEmpty(endpoint)) { logger.info("ClientTelemetry {}", OBJECT_MAPPER.writeValueAsString(this.clientTelemetryInfo)); clearDataForNextRun(); return this.sendClientTelemetry(); } else { URI targetEndpoint = new URI(endpoint); ByteBuffer byteBuffer = InternalObjectNode.serializeJsonToByteBuffer(this.clientTelemetryInfo, ClientTelemetry.OBJECT_MAPPER, null); byte[] tempBuffer = RxDocumentServiceRequest.toByteArray(byteBuffer); Map<String, String> headers = new HashMap<>(); String date = Utils.nowAsRFC1123(); headers.put(HttpConstants.HttpHeaders.X_DATE, date); String authorization = this.tokenProvider.getUserAuthorizationToken( "", ResourceType.ClientTelemetry, RequestVerb.POST, headers, AuthorizationTokenType.PrimaryMasterKey, null); try { authorization = URLEncoder.encode(authorization, Constants.UrlEncodingInfo.UTF_8); } catch (UnsupportedEncodingException e) { logger.error("Failed to encode authToken. Exception: ", e); this.clearDataForNextRun(); return this.sendClientTelemetry(); } HttpHeaders httpHeaders = new HttpHeaders(); httpHeaders.set(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON); httpHeaders.set(HttpConstants.HttpHeaders.CONTENT_ENCODING, RuntimeConstants.Encoding.GZIP); httpHeaders.set(HttpConstants.HttpHeaders.X_DATE, date); httpHeaders.set(HttpConstants.HttpHeaders.DATABASE_ACCOUNT_NAME, this.globalDatabaseAccountName); httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); String envName = Configs.getEnvironmentName(); if (StringUtils.isNotEmpty(envName)) { httpHeaders.set(HttpConstants.HttpHeaders.ENVIRONMENT_NAME, envName); } HttpRequest httpRequest = new HttpRequest(HttpMethod.POST, targetEndpoint, targetEndpoint.getPort(), httpHeaders) .withBody(tempBuffer); Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds())); return httpResponseMono.flatMap(response -> { if (response.statusCode() != HttpConstants.StatusCodes.NO_CONTENT) { logger.error("Client telemetry request did not succeeded, status code {}, request body {}", response.statusCode(), new String(tempBuffer, StandardCharsets.UTF_8)); response.bodyAsString().doOnSuccess(responsePayload -> { logger.error("Client telemetry request did not succeeded, status code {}, request body {}, response body {}", response.statusCode(), new String(tempBuffer, StandardCharsets.UTF_8), responsePayload); }).subscribe(); } this.clearDataForNextRun(); return this.sendClientTelemetry(); }).onErrorResume(throwable -> { logger.error("Error while sending client telemetry request Exception: ", throwable); this.clearDataForNextRun(); return this.sendClientTelemetry(); }); } } catch (JsonProcessingException | URISyntaxException ex) { logger.error("Error while preparing client telemetry. Exception: ", ex); this.clearDataForNextRun(); return this.sendClientTelemetry(); } }).onErrorResume(ex -> { logger.error("sendClientTelemetry() - Unable to send client telemetry" + ". Exception: ", ex); clearDataForNextRun(); return this.sendClientTelemetry(); }).subscribeOn(scheduler); } private void populateAzureVmMetaData(AzureVMMetadata azureVMMetadata) { this.clientTelemetryInfo.setApplicationRegion(azureVMMetadata.getLocation()); this.clientTelemetryInfo.setMachineId(VM_ID_PREFIX + azureVMMetadata.getVmId()); this.clientTelemetryInfo.setHostEnvInfo(azureVMMetadata.getOsType() + "|" + azureVMMetadata.getSku() + "|" + azureVMMetadata.getVmSize() + "|" + azureVMMetadata.getAzEnvironment()); } private void loadAzureVmMetaData() { AzureVMMetadata metadataSnapshot = azureVmMetaDataSingleton.get(); if (metadataSnapshot != null) { this.populateAzureVmMetaData(metadataSnapshot); return; } URI targetEndpoint = null; try { targetEndpoint = new URI(IMDSConfig.AZURE_VM_METADATA); } catch (URISyntaxException ex) { logger.info("Unable to parse azure vm metadata url"); return; } HashMap<String, String> headers = new HashMap<>(); headers.put("Metadata", "true"); HttpHeaders httpHeaders = new HttpHeaders(headers); HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders); Mono<HttpResponse> httpResponseMono = this.metadataHttpClient.send(httpRequest); httpResponseMono .flatMap(response -> response.bodyAsString()).map(metadataJson -> parse(metadataJson, AzureVMMetadata.class)).doOnSuccess(metadata -> { azureVmMetaDataSingleton.compareAndSet(null, metadata); this.populateAzureVmMetaData(metadata); }).onErrorResume(throwable -> { logger.info("Client is not on azure vm"); logger.debug("Unable to get azure vm metadata", throwable); return Mono.empty(); }).subscribe(); } private static <T> T parse(String itemResponseBodyAsString, Class<T> itemClassType) { try { return OBJECT_MAPPER.readValue(itemResponseBodyAsString, itemClassType); } catch (IOException e) { throw new IllegalStateException( "Failed to parse string [" + itemResponseBodyAsString + "] to POJO.", e); } } private void clearDataForNextRun() { this.clientTelemetryInfo.getSystemInfoMap().clear(); this.clientTelemetryInfo.getOperationInfoMap().clear(); this.clientTelemetryInfo.getCacheRefreshInfoMap().clear(); for (ConcurrentDoubleHistogram histogram : this.clientTelemetryInfo.getSystemInfoMap().values()) { histogram.reset(); } } private void readHistogram() { ConcurrentDoubleHistogram cpuHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.CPU_MAX, ClientTelemetry.CPU_PRECISION); cpuHistogram.setAutoResize(true); for (double val : CpuMemoryMonitor.getClientTelemetryCpuLatestList()) { recordValue(cpuHistogram, val); } ReportPayload cpuReportPayload = new ReportPayload(CPU_NAME, CPU_UNIT); clientTelemetryInfo.getSystemInfoMap().put(cpuReportPayload, cpuHistogram); ConcurrentDoubleHistogram memoryHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.MEMORY_MAX_IN_MB, ClientTelemetry.MEMORY_PRECISION); memoryHistogram.setAutoResize(true); for (double val : CpuMemoryMonitor.getClientTelemetryMemoryLatestList()) { recordValue(memoryHistogram, val); } ReportPayload memoryReportPayload = new ReportPayload(MEMORY_NAME, MEMORY_UNIT); clientTelemetryInfo.getSystemInfoMap().put(memoryReportPayload, memoryHistogram); this.clientTelemetryInfo.setTimeStamp(Instant.now().toString()); for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getSystemInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getCacheRefreshInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getOperationInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } } private void fillMetricsInfo(ReportPayload payload, ConcurrentDoubleHistogram histogram) { DoubleHistogram copyHistogram = histogram.copy(); payload.getMetricInfo().setCount(copyHistogram.getTotalCount()); payload.getMetricInfo().setMax(copyHistogram.getMaxValue()); payload.getMetricInfo().setMin(copyHistogram.getMinValue()); payload.getMetricInfo().setMean(copyHistogram.getMean()); Map<Double, Double> percentile = new HashMap<>(); percentile.put(PERCENTILE_50, copyHistogram.getValueAtPercentile(PERCENTILE_50)); percentile.put(PERCENTILE_90, copyHistogram.getValueAtPercentile(PERCENTILE_90)); percentile.put(PERCENTILE_95, copyHistogram.getValueAtPercentile(PERCENTILE_95)); percentile.put(PERCENTILE_99, copyHistogram.getValueAtPercentile(PERCENTILE_99)); percentile.put(PERCENTILE_999, copyHistogram.getValueAtPercentile(PERCENTILE_999)); payload.getMetricInfo().setPercentiles(percentile); } static class IMDSConfig { private static String AZURE_VM_METADATA = "http: private static final Duration DEFAULT_NETWORK_REQUEST_TIMEOUT = Duration.ofSeconds(60); private static final Duration DEFAULT_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60); private static final int DEFAULT_MAX_CONNECTION_POOL_SIZE = 1000; } }
updated
public void init() { loadAzureVmMetaData(); if (this.isClientTelemetryEnabled()) { sendClientTelemetry().subscribe(); } }
sendClientTelemetry().subscribe();
public void init() { loadAzureVmMetaData(); if (this.isClientTelemetryEnabled()) { sendClientTelemetry().subscribe(); } }
class ClientTelemetry { public final static boolean DEFAULT_CLIENT_TELEMETRY_ENABLED = false; public final static String VM_ID_PREFIX = "vmId_"; public final static int ONE_KB_TO_BYTES = 1024; public final static int REQUEST_LATENCY_MAX_MILLI_SEC = 300000; public final static int REQUEST_LATENCY_SUCCESS_PRECISION = 4; public final static int REQUEST_LATENCY_FAILURE_PRECISION = 2; public final static String REQUEST_LATENCY_NAME = "RequestLatency"; public final static String REQUEST_LATENCY_UNIT = "MilliSecond"; public final static int REQUEST_CHARGE_MAX = 10000; public final static int REQUEST_CHARGE_PRECISION = 2; public final static String REQUEST_CHARGE_NAME = "RequestCharge"; public final static String REQUEST_CHARGE_UNIT = "RU"; public final static String TCP_NEW_CHANNEL_LATENCY_NAME = "TcpNewChannelOpenLatency"; public final static String TCP_NEW_CHANNEL_LATENCY_UNIT = "MilliSecond"; public final static int TCP_NEW_CHANNEL_LATENCY_MAX_MILLI_SEC = 300000; public final static int TCP_NEW_CHANNEL_LATENCY_PRECISION = 2; public final static int CPU_MAX = 100; public final static int CPU_PRECISION = 2; private final static String CPU_NAME = "CPU"; private final static String CPU_UNIT = "Percentage"; public final static int MEMORY_MAX_IN_MB = 102400; public final static int MEMORY_PRECISION = 2; private final static String MEMORY_NAME = "MemoryRemaining"; private final static String MEMORY_UNIT = "MB"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private final static AtomicLong instanceCount = new AtomicLong(0); private final static AtomicReference<AzureVMMetadata> azureVmMetaDataSingleton = new AtomicReference<>(null); private final ClientTelemetryInfo clientTelemetryInfo; private final boolean clientTelemetryConfigEnabled; private final boolean clientMetricsEnabled; private final Configs configs; private final CosmosClientTelemetryConfig clientTelemetryConfig; private final HttpClient httpClient; private final HttpClient metadataHttpClient; private final ScheduledThreadPoolExecutor scheduledExecutorService = new ScheduledThreadPoolExecutor(1, new CosmosDaemonThreadFactory("ClientTelemetry-" + instanceCount.incrementAndGet())); private final Scheduler scheduler = Schedulers.fromExecutor(scheduledExecutorService); private static final Logger logger = LoggerFactory.getLogger(ClientTelemetry.class); private volatile boolean isClosed; private static final double PERCENTILE_50 = 50.0; private static final double PERCENTILE_90 = 90.0; private static final double PERCENTILE_95 = 95.0; private static final double PERCENTILE_99 = 99.0; private static final double PERCENTILE_999 = 99.9; private static final String USER_AGENT = Utils.getUserAgent(); private final int clientTelemetrySchedulingSec; private final IAuthorizationTokenProvider tokenProvider; private final String globalDatabaseAccountName; public ClientTelemetry(DiagnosticsClientContext diagnosticsClientContext, Boolean acceleratedNetworking, String clientId, String processId, ConnectionMode connectionMode, String globalDatabaseAccountName, String applicationRegion, String hostEnvInfo, Configs configs, CosmosClientTelemetryConfig clientTelemetryConfig, IAuthorizationTokenProvider tokenProvider, List<String> preferredRegions ) { clientTelemetryInfo = new ClientTelemetryInfo( getMachineId(diagnosticsClientContext.getConfig()), clientId, processId, USER_AGENT, connectionMode, globalDatabaseAccountName, applicationRegion, hostEnvInfo, acceleratedNetworking, preferredRegions); checkNotNull(clientTelemetryConfig, "Argument 'clientTelemetryConfig' cannot be null"); this.isClosed = false; this.configs = configs; this.clientTelemetryConfig = clientTelemetryConfig; ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor clientTelemetryAccessor = ImplementationBridgeHelpers .CosmosClientTelemetryConfigHelper .getCosmosClientTelemetryConfigAccessor(); assert(clientTelemetryAccessor != null); this.clientTelemetryConfigEnabled = clientTelemetryAccessor .isSendClientTelemetryToServiceEnabled(clientTelemetryConfig); this.clientMetricsEnabled = clientTelemetryAccessor .isClientMetricsEnabled(clientTelemetryConfig); this.httpClient = getHttpClientForClientTelemetry(); this.metadataHttpClient = getHttpClientForIMDS(); this.clientTelemetrySchedulingSec = Configs.getClientTelemetrySchedulingInSec(); this.tokenProvider = tokenProvider; this.globalDatabaseAccountName = globalDatabaseAccountName; } public ClientTelemetryInfo getClientTelemetryInfo() { return clientTelemetryInfo; } @JsonIgnore public CosmosClientTelemetryConfig getClientTelemetryConfig() { return clientTelemetryConfig; } public static String getMachineId(DiagnosticsClientContext.DiagnosticsClientConfig diagnosticsClientConfig) { AzureVMMetadata metadataSnapshot = azureVmMetaDataSingleton.get(); if (metadataSnapshot != null && metadataSnapshot.getVmId() != null) { String machineId = VM_ID_PREFIX + metadataSnapshot.getVmId(); if (diagnosticsClientConfig != null) { diagnosticsClientConfig.withMachineId(machineId); } return machineId; } if (diagnosticsClientConfig == null) { return ""; } return diagnosticsClientConfig.getMachineId(); } public static void recordValue(ConcurrentDoubleHistogram doubleHistogram, long value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public static void recordValue(ConcurrentDoubleHistogram doubleHistogram, double value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public boolean isClientTelemetryEnabled() { return this.clientTelemetryConfigEnabled; } public boolean isClientMetricsEnabled() { return this.clientMetricsEnabled; } public void close() { this.isClosed = true; this.scheduledExecutorService.shutdown(); logger.debug("GlobalEndpointManager closed."); } private HttpClient getHttpClientForClientTelemetry() { ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper .CosmosClientTelemetryConfigAccessor clientTelemetryConfigAccessor = ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper .getCosmosClientTelemetryConfigAccessor(); HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs) .withMaxIdleConnectionTimeout( clientTelemetryConfigAccessor.getIdleHttpConnectionTimeout(this.clientTelemetryConfig)) .withPoolSize(clientTelemetryConfigAccessor.getMaxConnectionPoolSize(this.clientTelemetryConfig)) .withProxy(clientTelemetryConfigAccessor.getProxy(this.clientTelemetryConfig)) .withNetworkRequestTimeout( clientTelemetryConfigAccessor.getHttpNetworkRequestTimeout(this.clientTelemetryConfig)); return HttpClient.createFixed(httpClientConfig); } private HttpClient getHttpClientForIMDS() { HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs) .withMaxIdleConnectionTimeout(IMDSConfig.DEFAULT_IDLE_CONNECTION_TIMEOUT) .withPoolSize(IMDSConfig.DEFAULT_MAX_CONNECTION_POOL_SIZE) .withNetworkRequestTimeout(IMDSConfig.DEFAULT_NETWORK_REQUEST_TIMEOUT); return HttpClient.createFixed(httpClientConfig); } private Mono<Void> sendClientTelemetry() { return Mono.delay(Duration.ofSeconds(clientTelemetrySchedulingSec), CosmosSchedulers.COSMOS_PARALLEL) .flatMap(t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } if (!this.isClientTelemetryEnabled()) { logger.trace("client telemetry not enabled"); return Mono.empty(); } readHistogram(); try { String endpoint = Configs.getClientTelemetryEndpoint(); if (StringUtils.isEmpty(endpoint)) { logger.info("ClientTelemetry {}", OBJECT_MAPPER.writeValueAsString(this.clientTelemetryInfo)); clearDataForNextRun(); return this.sendClientTelemetry(); } else { URI targetEndpoint = new URI(endpoint); ByteBuffer byteBuffer = InternalObjectNode.serializeJsonToByteBuffer(this.clientTelemetryInfo, ClientTelemetry.OBJECT_MAPPER, null); byte[] tempBuffer = RxDocumentServiceRequest.toByteArray(byteBuffer); Map<String, String> headers = new HashMap<>(); String date = Utils.nowAsRFC1123(); headers.put(HttpConstants.HttpHeaders.X_DATE, date); String authorization = this.tokenProvider.getUserAuthorizationToken( "", ResourceType.ClientTelemetry, RequestVerb.POST, headers, AuthorizationTokenType.PrimaryMasterKey, null); try { authorization = URLEncoder.encode(authorization, Constants.UrlEncodingInfo.UTF_8); } catch (UnsupportedEncodingException e) { logger.error("Failed to encode authToken. Exception: ", e); this.clearDataForNextRun(); return this.sendClientTelemetry(); } HttpHeaders httpHeaders = new HttpHeaders(); httpHeaders.set(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON); httpHeaders.set(HttpConstants.HttpHeaders.CONTENT_ENCODING, RuntimeConstants.Encoding.GZIP); httpHeaders.set(HttpConstants.HttpHeaders.X_DATE, date); httpHeaders.set(HttpConstants.HttpHeaders.DATABASE_ACCOUNT_NAME, this.globalDatabaseAccountName); httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); String envName = Configs.getEnvironmentName(); if (StringUtils.isNotEmpty(envName)) { httpHeaders.set(HttpConstants.HttpHeaders.ENVIRONMENT_NAME, envName); } HttpRequest httpRequest = new HttpRequest(HttpMethod.POST, targetEndpoint, targetEndpoint.getPort(), httpHeaders) .withBody(tempBuffer); Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds())); return httpResponseMono.flatMap(response -> { if (response.statusCode() != HttpConstants.StatusCodes.NO_CONTENT) { logger.error("Client telemetry request did not succeeded, status code {}, request body {}", response.statusCode(), new String(tempBuffer, StandardCharsets.UTF_8)); response.bodyAsString().doOnSuccess(responsePayload -> { logger.error("Client telemetry request did not succeeded, status code {}, request body {}, response body {}", response.statusCode(), new String(tempBuffer, StandardCharsets.UTF_8), responsePayload); }).subscribe(); } this.clearDataForNextRun(); return this.sendClientTelemetry(); }).onErrorResume(throwable -> { logger.error("Error while sending client telemetry request Exception: ", throwable); this.clearDataForNextRun(); return this.sendClientTelemetry(); }); } } catch (JsonProcessingException | URISyntaxException ex) { logger.error("Error while preparing client telemetry. Exception: ", ex); this.clearDataForNextRun(); return this.sendClientTelemetry(); } }).onErrorResume(ex -> { logger.error("sendClientTelemetry() - Unable to send client telemetry" + ". Exception: ", ex); clearDataForNextRun(); return this.sendClientTelemetry(); }).subscribeOn(scheduler); } private void populateAzureVmMetaData(AzureVMMetadata azureVMMetadata) { this.clientTelemetryInfo.setApplicationRegion(azureVMMetadata.getLocation()); this.clientTelemetryInfo.setMachineId(VM_ID_PREFIX + azureVMMetadata.getVmId()); this.clientTelemetryInfo.setHostEnvInfo(azureVMMetadata.getOsType() + "|" + azureVMMetadata.getSku() + "|" + azureVMMetadata.getVmSize() + "|" + azureVMMetadata.getAzEnvironment()); } private void loadAzureVmMetaData() { AzureVMMetadata metadataSnapshot = azureVmMetaDataSingleton.get(); if (metadataSnapshot != null) { this.populateAzureVmMetaData(metadataSnapshot); return; } URI targetEndpoint = null; try { targetEndpoint = new URI(IMDSConfig.AZURE_VM_METADATA); } catch (URISyntaxException ex) { logger.info("Unable to parse azure vm metadata url"); return; } HashMap<String, String> headers = new HashMap<>(); headers.put("Metadata", "true"); HttpHeaders httpHeaders = new HttpHeaders(headers); HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders); Mono<HttpResponse> httpResponseMono = this.metadataHttpClient.send(httpRequest); httpResponseMono .flatMap(response -> response.bodyAsString()).map(metadataJson -> parse(metadataJson, AzureVMMetadata.class)).doOnSuccess(metadata -> { azureVmMetaDataSingleton.compareAndSet(null, metadata); this.populateAzureVmMetaData(metadata); }).onErrorResume(throwable -> { logger.info("Client is not on azure vm"); logger.debug("Unable to get azure vm metadata", throwable); return Mono.empty(); }).subscribe(); } private static <T> T parse(String itemResponseBodyAsString, Class<T> itemClassType) { try { return OBJECT_MAPPER.readValue(itemResponseBodyAsString, itemClassType); } catch (IOException e) { throw new IllegalStateException( "Failed to parse string [" + itemResponseBodyAsString + "] to POJO.", e); } } private void clearDataForNextRun() { this.clientTelemetryInfo.getSystemInfoMap().clear(); this.clientTelemetryInfo.getOperationInfoMap().clear(); this.clientTelemetryInfo.getCacheRefreshInfoMap().clear(); for (ConcurrentDoubleHistogram histogram : this.clientTelemetryInfo.getSystemInfoMap().values()) { histogram.reset(); } } private void readHistogram() { ConcurrentDoubleHistogram cpuHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.CPU_MAX, ClientTelemetry.CPU_PRECISION); cpuHistogram.setAutoResize(true); for (double val : CpuMemoryMonitor.getClientTelemetryCpuLatestList()) { recordValue(cpuHistogram, val); } ReportPayload cpuReportPayload = new ReportPayload(CPU_NAME, CPU_UNIT); clientTelemetryInfo.getSystemInfoMap().put(cpuReportPayload, cpuHistogram); ConcurrentDoubleHistogram memoryHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.MEMORY_MAX_IN_MB, ClientTelemetry.MEMORY_PRECISION); memoryHistogram.setAutoResize(true); for (double val : CpuMemoryMonitor.getClientTelemetryMemoryLatestList()) { recordValue(memoryHistogram, val); } ReportPayload memoryReportPayload = new ReportPayload(MEMORY_NAME, MEMORY_UNIT); clientTelemetryInfo.getSystemInfoMap().put(memoryReportPayload, memoryHistogram); this.clientTelemetryInfo.setTimeStamp(Instant.now().toString()); for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getSystemInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getCacheRefreshInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getOperationInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } } private void fillMetricsInfo(ReportPayload payload, ConcurrentDoubleHistogram histogram) { DoubleHistogram copyHistogram = histogram.copy(); payload.getMetricInfo().setCount(copyHistogram.getTotalCount()); payload.getMetricInfo().setMax(copyHistogram.getMaxValue()); payload.getMetricInfo().setMin(copyHistogram.getMinValue()); payload.getMetricInfo().setMean(copyHistogram.getMean()); Map<Double, Double> percentile = new HashMap<>(); percentile.put(PERCENTILE_50, copyHistogram.getValueAtPercentile(PERCENTILE_50)); percentile.put(PERCENTILE_90, copyHistogram.getValueAtPercentile(PERCENTILE_90)); percentile.put(PERCENTILE_95, copyHistogram.getValueAtPercentile(PERCENTILE_95)); percentile.put(PERCENTILE_99, copyHistogram.getValueAtPercentile(PERCENTILE_99)); percentile.put(PERCENTILE_999, copyHistogram.getValueAtPercentile(PERCENTILE_999)); payload.getMetricInfo().setPercentiles(percentile); } static class IMDSConfig { private static String AZURE_VM_METADATA = "http: private static final Duration DEFAULT_NETWORK_REQUEST_TIMEOUT = Duration.ofSeconds(60); private static final Duration DEFAULT_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60); private static final int DEFAULT_MAX_CONNECTION_POOL_SIZE = 1000; } }
class ClientTelemetry { public final static boolean DEFAULT_CLIENT_TELEMETRY_ENABLED = false; public final static String VM_ID_PREFIX = "vmId_"; public final static int ONE_KB_TO_BYTES = 1024; public final static int REQUEST_LATENCY_MAX_MILLI_SEC = 300000; public final static int REQUEST_LATENCY_SUCCESS_PRECISION = 4; public final static int REQUEST_LATENCY_FAILURE_PRECISION = 2; public final static String REQUEST_LATENCY_NAME = "RequestLatency"; public final static String REQUEST_LATENCY_UNIT = "MilliSecond"; public final static int REQUEST_CHARGE_MAX = 10000; public final static int REQUEST_CHARGE_PRECISION = 2; public final static String REQUEST_CHARGE_NAME = "RequestCharge"; public final static String REQUEST_CHARGE_UNIT = "RU"; public final static String TCP_NEW_CHANNEL_LATENCY_NAME = "TcpNewChannelOpenLatency"; public final static String TCP_NEW_CHANNEL_LATENCY_UNIT = "MilliSecond"; public final static int TCP_NEW_CHANNEL_LATENCY_MAX_MILLI_SEC = 300000; public final static int TCP_NEW_CHANNEL_LATENCY_PRECISION = 2; public final static int CPU_MAX = 100; public final static int CPU_PRECISION = 2; private final static String CPU_NAME = "CPU"; private final static String CPU_UNIT = "Percentage"; public final static int MEMORY_MAX_IN_MB = 102400; public final static int MEMORY_PRECISION = 2; private final static String MEMORY_NAME = "MemoryRemaining"; private final static String MEMORY_UNIT = "MB"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private final static AtomicLong instanceCount = new AtomicLong(0); private final static AtomicReference<AzureVMMetadata> azureVmMetaDataSingleton = new AtomicReference<>(null); private final ClientTelemetryInfo clientTelemetryInfo; private final boolean clientTelemetryConfigEnabled; private final boolean clientMetricsEnabled; private final Configs configs; private final CosmosClientTelemetryConfig clientTelemetryConfig; private final HttpClient httpClient; private final HttpClient metadataHttpClient; private final ScheduledThreadPoolExecutor scheduledExecutorService = new ScheduledThreadPoolExecutor(1, new CosmosDaemonThreadFactory("ClientTelemetry-" + instanceCount.incrementAndGet())); private final Scheduler scheduler = Schedulers.fromExecutor(scheduledExecutorService); private static final Logger logger = LoggerFactory.getLogger(ClientTelemetry.class); private volatile boolean isClosed; private static final double PERCENTILE_50 = 50.0; private static final double PERCENTILE_90 = 90.0; private static final double PERCENTILE_95 = 95.0; private static final double PERCENTILE_99 = 99.0; private static final double PERCENTILE_999 = 99.9; private static final String USER_AGENT = Utils.getUserAgent(); private final int clientTelemetrySchedulingSec; private final IAuthorizationTokenProvider tokenProvider; private final String globalDatabaseAccountName; public ClientTelemetry(DiagnosticsClientContext diagnosticsClientContext, Boolean acceleratedNetworking, String clientId, String processId, ConnectionMode connectionMode, String globalDatabaseAccountName, String applicationRegion, String hostEnvInfo, Configs configs, CosmosClientTelemetryConfig clientTelemetryConfig, IAuthorizationTokenProvider tokenProvider, List<String> preferredRegions ) { clientTelemetryInfo = new ClientTelemetryInfo( getMachineId(diagnosticsClientContext.getConfig()), clientId, processId, USER_AGENT, connectionMode, globalDatabaseAccountName, applicationRegion, hostEnvInfo, acceleratedNetworking, preferredRegions); checkNotNull(clientTelemetryConfig, "Argument 'clientTelemetryConfig' cannot be null"); this.isClosed = false; this.configs = configs; this.clientTelemetryConfig = clientTelemetryConfig; ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor clientTelemetryAccessor = ImplementationBridgeHelpers .CosmosClientTelemetryConfigHelper .getCosmosClientTelemetryConfigAccessor(); assert(clientTelemetryAccessor != null); this.clientTelemetryConfigEnabled = clientTelemetryAccessor .isSendClientTelemetryToServiceEnabled(clientTelemetryConfig); this.clientMetricsEnabled = clientTelemetryAccessor .isClientMetricsEnabled(clientTelemetryConfig); this.httpClient = getHttpClientForClientTelemetry(); this.metadataHttpClient = getHttpClientForIMDS(); this.clientTelemetrySchedulingSec = Configs.getClientTelemetrySchedulingInSec(); this.tokenProvider = tokenProvider; this.globalDatabaseAccountName = globalDatabaseAccountName; } public ClientTelemetryInfo getClientTelemetryInfo() { return clientTelemetryInfo; } @JsonIgnore public CosmosClientTelemetryConfig getClientTelemetryConfig() { return clientTelemetryConfig; } public static String getMachineId(DiagnosticsClientContext.DiagnosticsClientConfig diagnosticsClientConfig) { AzureVMMetadata metadataSnapshot = azureVmMetaDataSingleton.get(); if (metadataSnapshot != null && metadataSnapshot.getVmId() != null) { String machineId = VM_ID_PREFIX + metadataSnapshot.getVmId(); if (diagnosticsClientConfig != null) { diagnosticsClientConfig.withMachineId(machineId); } return machineId; } if (diagnosticsClientConfig == null) { return ""; } return diagnosticsClientConfig.getMachineId(); } public static void recordValue(ConcurrentDoubleHistogram doubleHistogram, long value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public static void recordValue(ConcurrentDoubleHistogram doubleHistogram, double value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public boolean isClientTelemetryEnabled() { return this.clientTelemetryConfigEnabled; } public boolean isClientMetricsEnabled() { return this.clientMetricsEnabled; } public void close() { this.isClosed = true; this.scheduledExecutorService.shutdown(); logger.debug("GlobalEndpointManager closed."); } private HttpClient getHttpClientForClientTelemetry() { ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper .CosmosClientTelemetryConfigAccessor clientTelemetryConfigAccessor = ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper .getCosmosClientTelemetryConfigAccessor(); HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs) .withMaxIdleConnectionTimeout( clientTelemetryConfigAccessor.getIdleHttpConnectionTimeout(this.clientTelemetryConfig)) .withPoolSize(clientTelemetryConfigAccessor.getMaxConnectionPoolSize(this.clientTelemetryConfig)) .withProxy(clientTelemetryConfigAccessor.getProxy(this.clientTelemetryConfig)) .withNetworkRequestTimeout( clientTelemetryConfigAccessor.getHttpNetworkRequestTimeout(this.clientTelemetryConfig)); return HttpClient.createFixed(httpClientConfig); } private HttpClient getHttpClientForIMDS() { HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs) .withMaxIdleConnectionTimeout(IMDSConfig.DEFAULT_IDLE_CONNECTION_TIMEOUT) .withPoolSize(IMDSConfig.DEFAULT_MAX_CONNECTION_POOL_SIZE) .withNetworkRequestTimeout(IMDSConfig.DEFAULT_NETWORK_REQUEST_TIMEOUT); return HttpClient.createFixed(httpClientConfig); } private Mono<Void> sendClientTelemetry() { return Mono.delay(Duration.ofSeconds(clientTelemetrySchedulingSec), CosmosSchedulers.COSMOS_PARALLEL) .flatMap(t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } if (!this.isClientTelemetryEnabled()) { logger.trace("client telemetry not enabled"); return Mono.empty(); } readHistogram(); try { String endpoint = Configs.getClientTelemetryEndpoint(); if (StringUtils.isEmpty(endpoint)) { logger.info("ClientTelemetry {}", OBJECT_MAPPER.writeValueAsString(this.clientTelemetryInfo)); clearDataForNextRun(); return this.sendClientTelemetry(); } else { URI targetEndpoint = new URI(endpoint); ByteBuffer byteBuffer = InternalObjectNode.serializeJsonToByteBuffer(this.clientTelemetryInfo, ClientTelemetry.OBJECT_MAPPER, null); byte[] tempBuffer = RxDocumentServiceRequest.toByteArray(byteBuffer); Map<String, String> headers = new HashMap<>(); String date = Utils.nowAsRFC1123(); headers.put(HttpConstants.HttpHeaders.X_DATE, date); String authorization = this.tokenProvider.getUserAuthorizationToken( "", ResourceType.ClientTelemetry, RequestVerb.POST, headers, AuthorizationTokenType.PrimaryMasterKey, null); try { authorization = URLEncoder.encode(authorization, Constants.UrlEncodingInfo.UTF_8); } catch (UnsupportedEncodingException e) { logger.error("Failed to encode authToken. Exception: ", e); this.clearDataForNextRun(); return this.sendClientTelemetry(); } HttpHeaders httpHeaders = new HttpHeaders(); httpHeaders.set(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON); httpHeaders.set(HttpConstants.HttpHeaders.CONTENT_ENCODING, RuntimeConstants.Encoding.GZIP); httpHeaders.set(HttpConstants.HttpHeaders.X_DATE, date); httpHeaders.set(HttpConstants.HttpHeaders.DATABASE_ACCOUNT_NAME, this.globalDatabaseAccountName); httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); String envName = Configs.getEnvironmentName(); if (StringUtils.isNotEmpty(envName)) { httpHeaders.set(HttpConstants.HttpHeaders.ENVIRONMENT_NAME, envName); } HttpRequest httpRequest = new HttpRequest(HttpMethod.POST, targetEndpoint, targetEndpoint.getPort(), httpHeaders) .withBody(tempBuffer); Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds())); return httpResponseMono.flatMap(response -> { if (response.statusCode() != HttpConstants.StatusCodes.NO_CONTENT) { logger.error("Client telemetry request did not succeeded, status code {}, request body {}", response.statusCode(), new String(tempBuffer, StandardCharsets.UTF_8)); response.bodyAsString().doOnSuccess(responsePayload -> { logger.error("Client telemetry request did not succeeded, status code {}, request body {}, response body {}", response.statusCode(), new String(tempBuffer, StandardCharsets.UTF_8), responsePayload); }).subscribe(); } this.clearDataForNextRun(); return this.sendClientTelemetry(); }).onErrorResume(throwable -> { logger.error("Error while sending client telemetry request Exception: ", throwable); this.clearDataForNextRun(); return this.sendClientTelemetry(); }); } } catch (JsonProcessingException | URISyntaxException ex) { logger.error("Error while preparing client telemetry. Exception: ", ex); this.clearDataForNextRun(); return this.sendClientTelemetry(); } }).onErrorResume(ex -> { logger.error("sendClientTelemetry() - Unable to send client telemetry" + ". Exception: ", ex); clearDataForNextRun(); return this.sendClientTelemetry(); }).subscribeOn(scheduler); } private void populateAzureVmMetaData(AzureVMMetadata azureVMMetadata) { this.clientTelemetryInfo.setApplicationRegion(azureVMMetadata.getLocation()); this.clientTelemetryInfo.setMachineId(VM_ID_PREFIX + azureVMMetadata.getVmId()); this.clientTelemetryInfo.setHostEnvInfo(azureVMMetadata.getOsType() + "|" + azureVMMetadata.getSku() + "|" + azureVMMetadata.getVmSize() + "|" + azureVMMetadata.getAzEnvironment()); } private void loadAzureVmMetaData() { AzureVMMetadata metadataSnapshot = azureVmMetaDataSingleton.get(); if (metadataSnapshot != null) { this.populateAzureVmMetaData(metadataSnapshot); return; } URI targetEndpoint = null; try { targetEndpoint = new URI(IMDSConfig.AZURE_VM_METADATA); } catch (URISyntaxException ex) { logger.info("Unable to parse azure vm metadata url"); return; } HashMap<String, String> headers = new HashMap<>(); headers.put("Metadata", "true"); HttpHeaders httpHeaders = new HttpHeaders(headers); HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders); Mono<HttpResponse> httpResponseMono = this.metadataHttpClient.send(httpRequest); httpResponseMono .flatMap(response -> response.bodyAsString()).map(metadataJson -> parse(metadataJson, AzureVMMetadata.class)).doOnSuccess(metadata -> { azureVmMetaDataSingleton.compareAndSet(null, metadata); this.populateAzureVmMetaData(metadata); }).onErrorResume(throwable -> { logger.info("Client is not on azure vm"); logger.debug("Unable to get azure vm metadata", throwable); return Mono.empty(); }).subscribe(); } private static <T> T parse(String itemResponseBodyAsString, Class<T> itemClassType) { try { return OBJECT_MAPPER.readValue(itemResponseBodyAsString, itemClassType); } catch (IOException e) { throw new IllegalStateException( "Failed to parse string [" + itemResponseBodyAsString + "] to POJO.", e); } } private void clearDataForNextRun() { this.clientTelemetryInfo.getSystemInfoMap().clear(); this.clientTelemetryInfo.getOperationInfoMap().clear(); this.clientTelemetryInfo.getCacheRefreshInfoMap().clear(); for (ConcurrentDoubleHistogram histogram : this.clientTelemetryInfo.getSystemInfoMap().values()) { histogram.reset(); } } private void readHistogram() { ConcurrentDoubleHistogram cpuHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.CPU_MAX, ClientTelemetry.CPU_PRECISION); cpuHistogram.setAutoResize(true); for (double val : CpuMemoryMonitor.getClientTelemetryCpuLatestList()) { recordValue(cpuHistogram, val); } ReportPayload cpuReportPayload = new ReportPayload(CPU_NAME, CPU_UNIT); clientTelemetryInfo.getSystemInfoMap().put(cpuReportPayload, cpuHistogram); ConcurrentDoubleHistogram memoryHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.MEMORY_MAX_IN_MB, ClientTelemetry.MEMORY_PRECISION); memoryHistogram.setAutoResize(true); for (double val : CpuMemoryMonitor.getClientTelemetryMemoryLatestList()) { recordValue(memoryHistogram, val); } ReportPayload memoryReportPayload = new ReportPayload(MEMORY_NAME, MEMORY_UNIT); clientTelemetryInfo.getSystemInfoMap().put(memoryReportPayload, memoryHistogram); this.clientTelemetryInfo.setTimeStamp(Instant.now().toString()); for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getSystemInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getCacheRefreshInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getOperationInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } } private void fillMetricsInfo(ReportPayload payload, ConcurrentDoubleHistogram histogram) { DoubleHistogram copyHistogram = histogram.copy(); payload.getMetricInfo().setCount(copyHistogram.getTotalCount()); payload.getMetricInfo().setMax(copyHistogram.getMaxValue()); payload.getMetricInfo().setMin(copyHistogram.getMinValue()); payload.getMetricInfo().setMean(copyHistogram.getMean()); Map<Double, Double> percentile = new HashMap<>(); percentile.put(PERCENTILE_50, copyHistogram.getValueAtPercentile(PERCENTILE_50)); percentile.put(PERCENTILE_90, copyHistogram.getValueAtPercentile(PERCENTILE_90)); percentile.put(PERCENTILE_95, copyHistogram.getValueAtPercentile(PERCENTILE_95)); percentile.put(PERCENTILE_99, copyHistogram.getValueAtPercentile(PERCENTILE_99)); percentile.put(PERCENTILE_999, copyHistogram.getValueAtPercentile(PERCENTILE_999)); payload.getMetricInfo().setPercentiles(percentile); } static class IMDSConfig { private static String AZURE_VM_METADATA = "http: private static final Duration DEFAULT_NETWORK_REQUEST_TIMEOUT = Duration.ofSeconds(60); private static final Duration DEFAULT_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60); private static final int DEFAULT_MAX_CONNECTION_POOL_SIZE = 1000; } }
Remove the commented code
public Mono<Void> writeValueToAsync(AsynchronousByteChannel channel, ProgressReporter progressReporter) { Objects.requireNonNull(channel, "'channel' must not be null"); if (super.getValue() != null) { return FluxUtil.writeToAsynchronousByteChannel( FluxUtil.addProgressReporting(super.getValue(), progressReporter), channel); } else { return Mono.empty(); } }
public Mono<Void> writeValueToAsync(AsynchronousByteChannel channel, ProgressReporter progressReporter) { Objects.requireNonNull(channel, "'channel' must not be null"); if (super.getValue() != null) { return FluxUtil.writeToAsynchronousByteChannel( FluxUtil.addProgressReporting(super.getValue(), progressReporter), channel); } else { return Mono.empty(); } }
class BlobDownloadAsyncResponse extends ResponseBase<BlobDownloadHeaders, Flux<ByteBuffer>> implements Closeable { static { BlobDownloadAsyncResponseConstructorProxy.setAccessor(BlobDownloadAsyncResponse::new); } private static final ByteBuffer EMPTY_BUFFER = ByteBuffer.allocate(0); private final StreamResponse sourceResponse; /** * Constructs a {@link BlobDownloadAsyncResponse}. * * @param request Request sent to the service. * @param statusCode Response status code returned by the service. * @param headers Raw headers returned by the response. * @param value Stream of download data being returned by the service. * @param deserializedHeaders Headers deserialized into an object. */ public BlobDownloadAsyncResponse(HttpRequest request, int statusCode, HttpHeaders headers, Flux<ByteBuffer> value, BlobDownloadHeaders deserializedHeaders) { super(request, statusCode, headers, value, deserializedHeaders); this.sourceResponse = null; } /** * Constructs a {@link BlobDownloadAsyncResponse}. * * @param sourceResponse The initial Stream Response * @param onErrorResume Function used to resume. * @param retryOptions Retry options. */ BlobDownloadAsyncResponse(StreamResponse sourceResponse, BiFunction<Throwable, Long, Mono<StreamResponse>> onErrorResume, DownloadRetryOptions retryOptions) { super(sourceResponse.getRequest(), sourceResponse.getStatusCode(), sourceResponse.getHeaders(), createResponseFlux(sourceResponse, onErrorResume, retryOptions), extractHeaders(sourceResponse)); this.sourceResponse = Objects.requireNonNull(sourceResponse, "'sourceResponse' must not be null"); } private static BlobDownloadHeaders extractHeaders(StreamResponse response) { HttpHeaders headers = response.getHeaders(); return ModelHelper.populateBlobDownloadHeaders(new BlobsDownloadHeaders(headers), ModelHelper.getErrorCode(headers)); } private static Flux<ByteBuffer> createResponseFlux(StreamResponse sourceResponse, BiFunction<Throwable, Long, Mono<StreamResponse>> onErrorResume, DownloadRetryOptions retryOptions) { return FluxUtil.createRetriableDownloadFlux(sourceResponse::getValue, (throwable, position) -> onErrorResume.apply(throwable, position).flatMapMany(StreamResponse::getValue), retryOptions.getMaxRetryRequests()) .defaultIfEmpty(EMPTY_BUFFER); } /** * Transfers content bytes to the {@link AsynchronousByteChannel}. * @param channel The destination {@link AsynchronousByteChannel}. * @param progressReporter Optional {@link ProgressReporter}. * @return A {@link Mono} that completes when transfer is completed. */ @Override public void close() throws IOException { if (sourceResponse != null) { sourceResponse.close(); } else { super.getValue().subscribe().dispose(); } } }
class BlobDownloadAsyncResponse extends ResponseBase<BlobDownloadHeaders, Flux<ByteBuffer>> implements Closeable { static { BlobDownloadAsyncResponseConstructorProxy.setAccessor(BlobDownloadAsyncResponse::new); } private static final ByteBuffer EMPTY_BUFFER = ByteBuffer.allocate(0); private final StreamResponse sourceResponse; /** * Constructs a {@link BlobDownloadAsyncResponse}. * * @param request Request sent to the service. * @param statusCode Response status code returned by the service. * @param headers Raw headers returned by the response. * @param value Stream of download data being returned by the service. * @param deserializedHeaders Headers deserialized into an object. */ public BlobDownloadAsyncResponse(HttpRequest request, int statusCode, HttpHeaders headers, Flux<ByteBuffer> value, BlobDownloadHeaders deserializedHeaders) { super(request, statusCode, headers, value, deserializedHeaders); this.sourceResponse = null; } /** * Constructs a {@link BlobDownloadAsyncResponse}. * * @param sourceResponse The initial Stream Response * @param onErrorResume Function used to resume. * @param retryOptions Retry options. */ BlobDownloadAsyncResponse(StreamResponse sourceResponse, BiFunction<Throwable, Long, Mono<StreamResponse>> onErrorResume, DownloadRetryOptions retryOptions) { super(sourceResponse.getRequest(), sourceResponse.getStatusCode(), sourceResponse.getHeaders(), createResponseFlux(sourceResponse, onErrorResume, retryOptions), extractHeaders(sourceResponse)); this.sourceResponse = Objects.requireNonNull(sourceResponse, "'sourceResponse' must not be null"); } private static BlobDownloadHeaders extractHeaders(StreamResponse response) { HttpHeaders headers = response.getHeaders(); return ModelHelper.populateBlobDownloadHeaders(new BlobsDownloadHeaders(headers), ModelHelper.getErrorCode(headers)); } private static Flux<ByteBuffer> createResponseFlux(StreamResponse sourceResponse, BiFunction<Throwable, Long, Mono<StreamResponse>> onErrorResume, DownloadRetryOptions retryOptions) { return FluxUtil.createRetriableDownloadFlux(sourceResponse::getValue, (throwable, position) -> onErrorResume.apply(throwable, position).flatMapMany(StreamResponse::getValue), retryOptions.getMaxRetryRequests()) .defaultIfEmpty(EMPTY_BUFFER); } /** * Transfers content bytes to the {@link AsynchronousByteChannel}. * @param channel The destination {@link AsynchronousByteChannel}. * @param progressReporter Optional {@link ProgressReporter}. * @return A {@link Mono} that completes when transfer is completed. */ @Override public void close() throws IOException { if (sourceResponse != null) { sourceResponse.close(); } else { super.getValue().subscribe().dispose(); } } }