comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
same as [this](https://github.com/Azure/azure-sdk-for-java/pull/5061#discussion_r317320920) doOnEach should help wait till an item is emitted with success or error signal.
private void receiveEvents(PartitionOwnership partitionOwnership) { EventHubConsumerOptions consumerOptions = new EventHubConsumerOptions(); consumerOptions.ownerLevel(0L); EventPosition startFromEventPosition = partitionOwnership.sequenceNumber() == null ? this.initialEventPosition : EventPosition.fromSequenceNumber(partitionOwnership.sequenceNumber(), false); EventHubAsyncConsumer consumer = this.eventHubAsyncClient .createConsumer(this.consumerGroupName, partitionOwnership.partitionId(), startFromEventPosition, consumerOptions); this.partitionConsumers.put(partitionOwnership.partitionId(), consumer); PartitionContext partitionContext = new PartitionContext(partitionOwnership.partitionId(), this.eventHubName, this.consumerGroupName); CheckpointManager checkpointManager = new CheckpointManager(this.identifier, partitionContext, this.partitionManager, null); logger.info("Subscribing to receive events from partition {}", partitionOwnership.partitionId()); PartitionProcessor partitionProcessor = this.partitionProcessorFactory .createPartitionProcessor(partitionContext, checkpointManager); partitionProcessor.initialize().subscribe(); final AtomicReference<Context> processSpanContext = new AtomicReference<>(Context.NONE); consumer.receive().subscribeOn(Schedulers.newElastic("PartitionPump")) .subscribe(eventData -> { startScopedTracingSpan(eventData, processSpanContext); partitionProcessor.processEvent(eventData).subscribe(unused -> { }, partitionProcessor::processError); endScopedTracingSpan(processSpanContext); }, partitionProcessor::processError, () -> partitionProcessor.close(CloseReason.LOST_PARTITION_OWNERSHIP)); }
partitionProcessor.processEvent(eventData).subscribe(unused -> {
private void receiveEvents(PartitionOwnership partitionOwnership) { EventHubConsumerOptions consumerOptions = new EventHubConsumerOptions(); consumerOptions.ownerLevel(0L); EventPosition startFromEventPosition = partitionOwnership.sequenceNumber() == null ? this.initialEventPosition : EventPosition.fromSequenceNumber(partitionOwnership.sequenceNumber(), false); EventHubAsyncConsumer consumer = this.eventHubAsyncClient .createConsumer(this.consumerGroupName, partitionOwnership.partitionId(), startFromEventPosition, consumerOptions); this.partitionConsumers.put(partitionOwnership.partitionId(), consumer); PartitionContext partitionContext = new PartitionContext(partitionOwnership.partitionId(), this.eventHubName, this.consumerGroupName); CheckpointManager checkpointManager = new CheckpointManager(this.identifier, partitionContext, this.partitionManager, null); logger.info("Subscribing to receive events from partition {}", partitionOwnership.partitionId()); PartitionProcessor partitionProcessor = this.partitionProcessorFactory .createPartitionProcessor(partitionContext, checkpointManager); partitionProcessor.initialize().subscribe(); consumer.receive().subscribeOn(Schedulers.newElastic("PartitionPump")) .subscribe(eventData -> { Context processSpanContext = startProcessTracingSpan(eventData); if (processSpanContext.getData(SPAN_CONTEXT).isPresent()) { eventData.addContext(SPAN_CONTEXT, processSpanContext); } partitionProcessor.processEvent(eventData).doOnEach(signal -> endProcessTracingSpan(processSpanContext, signal)).subscribe(unused -> { }, partitionProcessor::processError); }, partitionProcessor::processError, () -> partitionProcessor.close(CloseReason.LOST_PARTITION_OWNERSHIP)); }
class EventProcessor { private static final long INTERVAL_IN_SECONDS = 10; private static final long INITIAL_DELAY = 0; private static final long OWNERSHIP_EXPIRATION_TIME_IN_MILLIS = TimeUnit.SECONDS.toMillis(30); private final ClientLogger logger = new ClientLogger(EventProcessor.class); private final EventHubAsyncClient eventHubAsyncClient; private final String consumerGroupName; private final EventPosition initialEventPosition; private final PartitionProcessorFactory partitionProcessorFactory; private final PartitionManager partitionManager; private final String identifier; private final Map<String, EventHubAsyncConsumer> partitionConsumers = new ConcurrentHashMap<>(); private final String eventHubName; private final AtomicBoolean started = new AtomicBoolean(false); private Disposable runner; private Scheduler scheduler; /** * Package-private constructor. Use {@link EventHubClientBuilder} to create an instance. * * @param eventHubAsyncClient The {@link EventHubAsyncClient}. * @param consumerGroupName The consumer group name used in this event processor to consumer events. * @param partitionProcessorFactory The factory to create new partition processor(s). * @param initialEventPosition Initial event position to start consuming events. * @param partitionManager The partition manager. * @param eventHubName The Event Hub name. */ EventProcessor(EventHubAsyncClient eventHubAsyncClient, String consumerGroupName, PartitionProcessorFactory partitionProcessorFactory, EventPosition initialEventPosition, PartitionManager partitionManager, String eventHubName) { this.eventHubAsyncClient = Objects .requireNonNull(eventHubAsyncClient, "eventHubAsyncClient cannot be null"); this.consumerGroupName = Objects .requireNonNull(consumerGroupName, "consumerGroupname cannot be null"); this.partitionProcessorFactory = Objects .requireNonNull(partitionProcessorFactory, "partitionProcessorFactory cannot be null"); this.partitionManager = Objects .requireNonNull(partitionManager, "partitionManager cannot be null"); this.initialEventPosition = Objects .requireNonNull(initialEventPosition, "initialEventPosition cannot be null"); this.eventHubName = Objects .requireNonNull(eventHubName, "eventHubName cannot be null"); this.identifier = UUID.randomUUID().toString(); logger.info("The instance ID for this event processors is {}", this.identifier); } /** * The identifier is a unique name given to this event processor instance. * * @return Identifier for this event processor. */ public String identifier() { return this.identifier; } /** * Starts processing of events for all partitions of the Event Hub that this event processor can own, assigning a * dedicated {@link PartitionProcessor} to each partition. If there are other Event Processors active for the same * consumer group on the Event Hub, responsibility for partitions will be shared between them. * <p> * Subsequent calls to start will be ignored if this event processor is already running. Calling start after {@link * * </p> * * <p><strong>Starting the processor to consume events from all partitions</strong></p> * {@codesnippet com.azure.messaging.eventhubs.eventprocessor.startstop} */ public synchronized void start() { if (!started.compareAndSet(false, true)) { logger.info("Event processor is already running"); return; } logger.info("Starting a new event processor instance with id {}", this.identifier); scheduler = Schedulers.newElastic("EventProcessor"); runner = scheduler.schedulePeriodically(this::run, INITIAL_DELAY, INTERVAL_IN_SECONDS, TimeUnit.SECONDS); } /** * Stops processing events for all partitions owned by this event processor. All {@link PartitionProcessor} will be * shutdown and any open resources will be closed. * <p> * Subsequent calls to stop will be ignored if the event processor is not running. * </p> * * <p><strong>Stopping the processor</strong></p> * {@codesnippet com.azure.messaging.eventhubs.eventprocessor.startstop} */ public synchronized void stop() { if (!started.compareAndSet(true, false)) { logger.info("Event processor has already stopped"); return; } this.partitionConsumers.forEach((key, value) -> { try { logger.info("Closing event hub consumer for partition {}", key); value.close(); logger.info("Closed event hub consumer for partition {}", key); partitionConsumers.remove(key); } catch (IOException ex) { logger.warning("Unable to close event hub consumer for partition {}", key); } }); runner.dispose(); scheduler.dispose(); } /* * A simple implementation of an event processor that: * 1. Fetches all partition ids from Event Hub * 2. Gets the current ownership information of all the partitions from PartitionManager * 3. Claims ownership of any partition that doesn't have an owner yet. * 4. Starts a new PartitionProcessor and receives events from each of the partitions this instance owns */ private void run() { /* This will run periodically to get new ownership details and close/open new consumers when ownership of this instance has changed */ final Flux<PartitionOwnership> ownershipFlux = partitionManager.listOwnership(eventHubName, consumerGroupName) .cache(); eventHubAsyncClient.getPartitionIds() .flatMap(id -> getCandidatePartitions(ownershipFlux, id)) .flatMap(this::claimOwnership) .subscribe(this::receiveEvents, ex -> logger.warning("Failed to receive events {}", ex.getMessage()), () -> logger.info("Completed starting partition pumps for new partitions owned")); } /* * Get the candidate partitions for claiming ownerships */ private Publisher<? extends PartitionOwnership> getCandidatePartitions(Flux<PartitionOwnership> ownershipFlux, String id) { return ownershipFlux .filter(ownership -> id.equals(ownership.partitionId())) .single(new PartitionOwnership() .partitionId(id) .eventHubName(this.eventHubName) .ownerId(this.identifier) .consumerGroupName(this.consumerGroupName) .ownerLevel(0L)); } /* * Claim ownership of the given partition if it's available */ private Publisher<? extends PartitionOwnership> claimOwnership(PartitionOwnership ownershipInfo) { if (ownershipInfo.lastModifiedTime() == null || (System.currentTimeMillis() - ownershipInfo.lastModifiedTime() > OWNERSHIP_EXPIRATION_TIME_IN_MILLIS && !ownershipInfo.ownerId().equals(this.identifier))) { ownershipInfo.ownerId(this.identifier); return partitionManager.claimOwnership(ownershipInfo).doOnComplete(() -> { logger.info("Claimed ownership of partition {}", ownershipInfo.partitionId()); }).doOnError(error -> { logger.error("Unable to claim ownership of partition {}", ownershipInfo.partitionId(), error); }); } else { return Flux.empty(); } } /* * Creates a new consumer for given partition and starts receiving events for that partition. */ /* * Starts a new process tracing span and attached context the EventData object for users. */ private void startScopedTracingSpan(EventData eventData, AtomicReference<Context> processSpanContext) { Object diagnosticId = eventData.properties().get(DIAGNOSTIC_ID_KEY); if (diagnosticId == null) { return; } eventData.context(TraceUtil.extractContext(diagnosticId.toString(), Context.NONE)); processSpanContext.set(TraceUtil.startScopedSpan("process", eventData.context())); eventData.context(processSpanContext.get()); } /* * Ends the tracing span and the scope of that span. */ private void endScopedTracingSpan(AtomicReference<Context> processSpanContext) { if (!processSpanContext.get().getData("scope").isPresent()) { return; } Closeable close = (Closeable) processSpanContext.get().getData("scope").get(); try { close.close(); } catch (IOException ioException) { logger.error("EventProcessor.run() endTracingSpan().close() failed with an error %s", ioException); } TraceUtil.endTracingSpan(processSpanContext.get(), null); } }
class EventProcessor { private static final long INTERVAL_IN_SECONDS = 10; private static final long INITIAL_DELAY = 0; private static final long OWNERSHIP_EXPIRATION_TIME_IN_MILLIS = TimeUnit.SECONDS.toMillis(30); private final ClientLogger logger = new ClientLogger(EventProcessor.class); private final EventHubAsyncClient eventHubAsyncClient; private final String consumerGroupName; private final EventPosition initialEventPosition; private final PartitionProcessorFactory partitionProcessorFactory; private final PartitionManager partitionManager; private final String identifier; private final Map<String, EventHubAsyncConsumer> partitionConsumers = new ConcurrentHashMap<>(); private final String eventHubName; private final TracerProvider tracerProvider; private final AtomicBoolean started = new AtomicBoolean(false); private Disposable runner; private Scheduler scheduler; /** * Package-private constructor. Use {@link EventHubClientBuilder} to create an instance. * @param eventHubAsyncClient The {@link EventHubAsyncClient}. * @param consumerGroupName The consumer group name used in this event processor to consumer events. * @param partitionProcessorFactory The factory to create new partition processor(s). * @param initialEventPosition Initial event position to start consuming events. * @param partitionManager The partition manager. * @param eventHubName The Event Hub name. * @param tracerProvider The tracer implementation */ EventProcessor(EventHubAsyncClient eventHubAsyncClient, String consumerGroupName, PartitionProcessorFactory partitionProcessorFactory, EventPosition initialEventPosition, PartitionManager partitionManager, String eventHubName, TracerProvider tracerProvider) { this.eventHubAsyncClient = Objects .requireNonNull(eventHubAsyncClient, "eventHubAsyncClient cannot be null"); this.consumerGroupName = Objects .requireNonNull(consumerGroupName, "consumerGroupname cannot be null"); this.partitionProcessorFactory = Objects .requireNonNull(partitionProcessorFactory, "partitionProcessorFactory cannot be null"); this.partitionManager = Objects .requireNonNull(partitionManager, "partitionManager cannot be null"); this.initialEventPosition = Objects .requireNonNull(initialEventPosition, "initialEventPosition cannot be null"); this.eventHubName = Objects .requireNonNull(eventHubName, "eventHubName cannot be null"); this.tracerProvider = tracerProvider; this.identifier = UUID.randomUUID().toString(); logger.info("The instance ID for this event processors is {}", this.identifier); } /** * The identifier is a unique name given to this event processor instance. * * @return Identifier for this event processor. */ public String identifier() { return this.identifier; } /** * Starts processing of events for all partitions of the Event Hub that this event processor can own, assigning a * dedicated {@link PartitionProcessor} to each partition. If there are other Event Processors active for the same * consumer group on the Event Hub, responsibility for partitions will be shared between them. * <p> * Subsequent calls to start will be ignored if this event processor is already running. Calling start after {@link * * </p> * * <p><strong>Starting the processor to consume events from all partitions</strong></p> * {@codesnippet com.azure.messaging.eventhubs.eventprocessor.startstop} */ public synchronized void start() { if (!started.compareAndSet(false, true)) { logger.info("Event processor is already running"); return; } logger.info("Starting a new event processor instance with id {}", this.identifier); scheduler = Schedulers.newElastic("EventProcessor"); runner = scheduler.schedulePeriodically(this::run, INITIAL_DELAY, INTERVAL_IN_SECONDS, TimeUnit.SECONDS); } /** * Stops processing events for all partitions owned by this event processor. All {@link PartitionProcessor} will be * shutdown and any open resources will be closed. * <p> * Subsequent calls to stop will be ignored if the event processor is not running. * </p> * * <p><strong>Stopping the processor</strong></p> * {@codesnippet com.azure.messaging.eventhubs.eventprocessor.startstop} */ public synchronized void stop() { if (!started.compareAndSet(true, false)) { logger.info("Event processor has already stopped"); return; } this.partitionConsumers.forEach((key, value) -> { try { logger.info("Closing event hub consumer for partition {}", key); value.close(); logger.info("Closed event hub consumer for partition {}", key); partitionConsumers.remove(key); } catch (IOException ex) { logger.warning("Unable to close event hub consumer for partition {}", key); } }); runner.dispose(); scheduler.dispose(); } /* * A simple implementation of an event processor that: * 1. Fetches all partition ids from Event Hub * 2. Gets the current ownership information of all the partitions from PartitionManager * 3. Claims ownership of any partition that doesn't have an owner yet. * 4. Starts a new PartitionProcessor and receives events from each of the partitions this instance owns */ private void run() { /* This will run periodically to get new ownership details and close/open new consumers when ownership of this instance has changed */ final Flux<PartitionOwnership> ownershipFlux = partitionManager.listOwnership(eventHubName, consumerGroupName) .cache(); eventHubAsyncClient.getPartitionIds() .flatMap(id -> getCandidatePartitions(ownershipFlux, id)) .flatMap(this::claimOwnership) .subscribe(this::receiveEvents, ex -> logger.warning("Failed to receive events {}", ex.getMessage()), () -> logger.info("Completed starting partition pumps for new partitions owned")); } /* * Get the candidate partitions for claiming ownerships */ private Publisher<? extends PartitionOwnership> getCandidatePartitions(Flux<PartitionOwnership> ownershipFlux, String id) { return ownershipFlux .filter(ownership -> id.equals(ownership.partitionId())) .single(new PartitionOwnership() .partitionId(id) .eventHubName(this.eventHubName) .ownerId(this.identifier) .consumerGroupName(this.consumerGroupName) .ownerLevel(0L)); } /* * Claim ownership of the given partition if it's available */ private Publisher<? extends PartitionOwnership> claimOwnership(PartitionOwnership ownershipInfo) { if (ownershipInfo.lastModifiedTime() == null || (System.currentTimeMillis() - ownershipInfo.lastModifiedTime() > OWNERSHIP_EXPIRATION_TIME_IN_MILLIS && !ownershipInfo.ownerId().equals(this.identifier))) { ownershipInfo.ownerId(this.identifier); return partitionManager.claimOwnership(ownershipInfo).doOnComplete(() -> { logger.info("Claimed ownership of partition {}", ownershipInfo.partitionId()); }).doOnError(error -> { logger.error("Unable to claim ownership of partition {}", ownershipInfo.partitionId(), error); }); } else { return Flux.empty(); } } /* * Creates a new consumer for given partition and starts receiving events for that partition. */ /* * Starts a new process tracing span and attached context the EventData object for users. */ private Context startProcessTracingSpan(EventData eventData) { Object diagnosticId = eventData.properties().get(DIAGNOSTIC_ID_KEY); if (diagnosticId == null || !tracerProvider.isEnabled()) { return Context.NONE; } Context spanContext = tracerProvider.extractContext(diagnosticId.toString(), Context.NONE); return tracerProvider.startSpan(spanContext, ProcessKind.PROCESS); } /* * Ends the process tracing span and the scope of that span. */ private void endProcessTracingSpan(Context processSpanContext, Signal<Void> signal) { Optional<Object> spanScope = processSpanContext.getData("scope"); if (!spanScope.isPresent() || !tracerProvider.isEnabled()) { return; } if (spanScope.get() instanceof Closeable) { Closeable close = (Closeable) processSpanContext.getData("scope").get(); try { close.close(); tracerProvider.endSpan(processSpanContext, signal); } catch (IOException ioException) { logger.error("EventProcessor.run() endTracingSpan().close() failed with an error %s", ioException); } } else { logger.warning(String.format(Locale.US, "Process span scope type is not of type Closeable, but type: %s. Not closing the scope and span", spanScope.get() != null ? spanScope.getClass() : "null")); } } }
I don't think there is a case where tracerProvider will ever be null because EventHubClientBuilder always returns a new instance of TracerProvider. You should just add a method in TracerProvider.isEnabled(). And see if there are any tracers that are passed in.
private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.partitionKey(); verifyPartitionKey(partitionKey); if (tracerProvider != null) { return sendInternalTracingEnabled(events, partitionKey); } else { return sendInternalTracingDisabled(events, partitionKey); } }
if (tracerProvider != null) {
private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.partitionKey(); verifyPartitionKey(partitionKey); if (tracerProvider.isEnabled()) { return sendInternalTracingEnabled(events, partitionKey); } else { return sendInternalTracingDisabled(events, partitionKey); } }
class EventHubAsyncProducer implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; /** * The default maximum allowable size, in bytes, for a batch to be sent. */ public static final int MAX_MESSAGE_LENGTH_BYTES = 256 * 1024; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final BatchOptions DEFAULT_BATCH_OPTIONS = new BatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubAsyncProducer.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final EventHubProducerOptions senderOptions; private final Mono<AmqpSendLink> sendLinkMono; private final boolean isPartitionSender; private final TracerProvider tracerProvider; /** * Creates a new instance of this {@link EventHubAsyncProducer} that sends messages to {@link * EventHubProducerOptions * otherwise, allows the service to load balance the messages amongst available partitions. */ EventHubAsyncProducer(Mono<AmqpSendLink> amqpSendLinkMono, EventHubProducerOptions options, TracerProvider tracerProvider) { this.sendLinkMono = amqpSendLinkMono.cache(); this.senderOptions = options; this.isPartitionSender = !ImplUtils.isNullOrEmpty(options.partitionId()); this.tracerProvider = tracerProvider; } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * @param options A set of options used to configure the {@link EventDataBatch}. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch(BatchOptions options) { Objects.requireNonNull(options, "'options' cannot be null."); final BatchOptions clone = options.clone(); verifyPartitionKey(clone.partitionKey()); return sendLinkMono.flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (clone.maximumSizeInBytes() > maximumLinkSize) { return Mono.error(new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", clone.maximumSizeInBytes(), maximumLinkSize))); } final int batchSize = clone.maximumSizeInBytes() > 0 ? clone.maximumSizeInBytes() : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, clone.partitionKey(), () -> link.getErrorContext())); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * * @return A {@link Mono} that completes when the event is pushed to the service. */ public Mono<Void> send(EventData event) { Objects.requireNonNull(event, "'event' cannot be null."); return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * * @return A {@link Mono} that completes when the event is pushed to the service. */ public Mono<Void> send(EventData event, SendOptions options) { Objects.requireNonNull(event, "'event' cannot be null."); Objects.requireNonNull(options, "'options' cannot be null."); return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Iterable<EventData> events) { Objects.requireNonNull(events, "'events' cannot be null."); return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { Objects.requireNonNull(events, "'options' cannot be null."); return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Flux<EventData> events) { Objects.requireNonNull(events, "'events' cannot be null."); return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Flux<EventData> events, SendOptions options) { Objects.requireNonNull(events, "'events' cannot be null."); Objects.requireNonNull(options, "'options' cannot be null."); return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * @param batch The batch to send to the service. * * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubAsyncProducer * @see EventHubAsyncProducer */ public Mono<Void> send(EventDataBatch batch) { Objects.requireNonNull(batch, "'batch' cannot be null."); if (batch.getEvents().isEmpty()) { logger.info("Cannot send an EventBatch that is empty."); return Mono.empty(); } logger.info("Sending batch with partitionKey[{}], size[{}].", batch.getPartitionKey(), batch.getSize()); final List<Message> messages = EventDataUtil.toAmqpMessage(batch.getPartitionKey(), batch.getEvents()); return sendLinkMono.flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)); } private Mono<Void> sendInternalTracingDisabled(Flux<EventData> events, String partitionKey) { return sendLinkMono.flatMap(link -> { return link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final BatchOptions batchOptions = new BatchOptions() .partitionKey(partitionKey) .maximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, () -> link.getErrorContext())); }) .flatMap(list -> sendInternal(Flux.fromIterable(list))); }); } private Mono<Void> sendInternalTracingEnabled(Flux<EventData> events, String partitionKey) { return sendLinkMono.flatMap(link -> { final AtomicReference<Context> sendSpanContext = new AtomicReference<>(Context.NONE); return link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final BatchOptions batchOptions = new BatchOptions() .partitionKey(partitionKey) .maximumSizeInBytes(batchSize); return events.map(eventData -> { Context parentContext = eventData.context(); Context entityContext = parentContext.addData(ENTITY_PATH, link.getEntityPath()); sendSpanContext.set(tracerProvider.startSpan(entityContext.addData(HOST_NAME, link.getHostname()), ProcessKind.SEND)); return setSpanContext(eventData, parentContext); }).collect(new EventDataCollector(batchOptions, 1, () -> link.getErrorContext())); }) .flatMap(list -> sendInternal(Flux.fromIterable(list))) .doOnEach(signal -> { tracerProvider.endSpan(sendSpanContext.get(), signal); }); }); } private EventData setSpanContext(EventData event, Context parentContext) { Optional<Object> eventContextData = event.context().getData(SPAN_CONTEXT); if (eventContextData.isPresent()) { Object spanContextObject = eventContextData.get(); if (spanContextObject instanceof Context) { tracerProvider.addSpanLinks((Context) eventContextData.get()); } else { logger.warning(String.format(Locale.US, "Event Data context type is not of type Context, but type: %s. Not setting body contents.", spanContextObject != null ? spanContextObject.getClass() : "null")); } return event; } else { Context eventSpanContext = tracerProvider.startSpan(parentContext, ProcessKind.RECEIVE); if (eventSpanContext != null) { Optional<Object> eventDiagnosticIdOptional = eventSpanContext.getData(DIAGNOSTIC_ID_KEY); if (eventDiagnosticIdOptional.isPresent()) { event.addProperty(DIAGNOSTIC_ID_KEY, eventDiagnosticIdOptional.get().toString()); tracerProvider.endSpan(eventSpanContext, Signal.complete()); event.addContext(SPAN_CONTEXT, eventSpanContext); } } } return event; } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error("Error sending batch.", error); }); } private void verifyPartitionKey(String partitionKey) { if (ImplUtils.isNullOrEmpty(partitionKey)) { return; } if (isPartitionSender) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "BatchOptions.partitionKey() cannot be set when an EventHubProducer is created with" + "EventHubProducerOptions.partitionId() set. This EventHubProducer can only send events to partition '%s'.", senderOptions.partitionId()))); } else if (partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "PartitionKey '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } } /** * Disposes of the {@link EventHubAsyncProducer} by closing the underlying connection to the service. * @throws IOException if the underlying transport could not be closed and its resources could not be * disposed. */ @Override public void close() throws IOException { if (!isDisposed.getAndSet(true)) { final AmqpSendLink block = sendLinkMono.block(senderOptions.retry().tryTimeout()); if (block != null) { block.close(); } } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * ErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private volatile EventDataBatch currentBatch; EventDataCollector(BatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.maximumSizeInBytes() > 0 ? options.maximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.partitionKey(); this.contextProvider = contextProvider; currentBatch = new EventDataBatch(this.maxMessageSize, options.partitionKey(), contextProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, "EventData does not fit into maximum number of batches. '%s'", maxNumberOfBatches); throw new AmqpException(false, ErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionKey, contextProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubAsyncProducer implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; /** * The default maximum allowable size, in bytes, for a batch to be sent. */ public static final int MAX_MESSAGE_LENGTH_BYTES = 256 * 1024; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final BatchOptions DEFAULT_BATCH_OPTIONS = new BatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubAsyncProducer.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final EventHubProducerOptions senderOptions; private final Mono<AmqpSendLink> sendLinkMono; private final boolean isPartitionSender; private final TracerProvider tracerProvider; /** * Creates a new instance of this {@link EventHubAsyncProducer} that sends messages to {@link * EventHubProducerOptions * otherwise, allows the service to load balance the messages amongst available partitions. */ EventHubAsyncProducer(Mono<AmqpSendLink> amqpSendLinkMono, EventHubProducerOptions options, TracerProvider tracerProvider) { this.sendLinkMono = amqpSendLinkMono.cache(); this.senderOptions = options; this.isPartitionSender = !ImplUtils.isNullOrEmpty(options.partitionId()); this.tracerProvider = tracerProvider; } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * @param options A set of options used to configure the {@link EventDataBatch}. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch(BatchOptions options) { Objects.requireNonNull(options, "'options' cannot be null."); final BatchOptions clone = options.clone(); verifyPartitionKey(clone.partitionKey()); return sendLinkMono.flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (clone.maximumSizeInBytes() > maximumLinkSize) { return Mono.error(new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", clone.maximumSizeInBytes(), maximumLinkSize))); } final int batchSize = clone.maximumSizeInBytes() > 0 ? clone.maximumSizeInBytes() : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, clone.partitionKey(), () -> link.getErrorContext())); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * * @return A {@link Mono} that completes when the event is pushed to the service. */ public Mono<Void> send(EventData event) { Objects.requireNonNull(event, "'event' cannot be null."); return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * * @return A {@link Mono} that completes when the event is pushed to the service. */ public Mono<Void> send(EventData event, SendOptions options) { Objects.requireNonNull(event, "'event' cannot be null."); Objects.requireNonNull(options, "'options' cannot be null."); return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Iterable<EventData> events) { Objects.requireNonNull(events, "'events' cannot be null."); return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { Objects.requireNonNull(events, "'options' cannot be null."); return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Flux<EventData> events) { Objects.requireNonNull(events, "'events' cannot be null."); return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Flux<EventData> events, SendOptions options) { Objects.requireNonNull(events, "'events' cannot be null."); Objects.requireNonNull(options, "'options' cannot be null."); return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * @param batch The batch to send to the service. * * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubAsyncProducer * @see EventHubAsyncProducer */ public Mono<Void> send(EventDataBatch batch) { Objects.requireNonNull(batch, "'batch' cannot be null."); if (batch.getEvents().isEmpty()) { logger.info("Cannot send an EventBatch that is empty."); return Mono.empty(); } logger.info("Sending batch with partitionKey[{}], size[{}].", batch.getPartitionKey(), batch.getSize()); final List<Message> messages = EventDataUtil.toAmqpMessage(batch.getPartitionKey(), batch.getEvents()); return sendLinkMono.flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)); } private Mono<Void> sendInternalTracingDisabled(Flux<EventData> events, String partitionKey) { return sendLinkMono.flatMap(link -> { return link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final BatchOptions batchOptions = new BatchOptions() .partitionKey(partitionKey) .maximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, () -> link.getErrorContext())); }) .flatMap(list -> sendInternal(Flux.fromIterable(list))); }); } private Mono<Void> sendInternalTracingEnabled(Flux<EventData> events, String partitionKey) { return sendLinkMono.flatMap(link -> { final AtomicReference<Context> sendSpanContext = new AtomicReference<>(Context.NONE); return link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final BatchOptions batchOptions = new BatchOptions() .partitionKey(partitionKey) .maximumSizeInBytes(batchSize); return events.map(eventData -> { Context parentContext = eventData.context(); Context entityContext = parentContext.addData(ENTITY_PATH, link.getEntityPath()); sendSpanContext.set(tracerProvider.startSpan(entityContext.addData(HOST_NAME, link.getHostname()), ProcessKind.SEND)); return setSpanContext(eventData, parentContext); }).collect(new EventDataCollector(batchOptions, 1, () -> link.getErrorContext())); }) .flatMap(list -> sendInternal(Flux.fromIterable(list))) .doOnEach(signal -> { tracerProvider.endSpan(sendSpanContext.get(), signal); }); }); } private EventData setSpanContext(EventData event, Context parentContext) { Optional<Object> eventContextData = event.context().getData(SPAN_CONTEXT); if (eventContextData.isPresent()) { Object spanContextObject = eventContextData.get(); if (spanContextObject instanceof Context) { tracerProvider.addSpanLinks((Context) eventContextData.get()); } else { logger.warning(String.format(Locale.US, "Event Data context type is not of type Context, but type: %s. Not adding span links.", spanContextObject != null ? spanContextObject.getClass() : "null")); } return event; } else { Context eventSpanContext = tracerProvider.startSpan(parentContext, ProcessKind.RECEIVE); if (eventSpanContext != null) { Optional<Object> eventDiagnosticIdOptional = eventSpanContext.getData(DIAGNOSTIC_ID_KEY); if (eventDiagnosticIdOptional.isPresent()) { event.addProperty(DIAGNOSTIC_ID_KEY, eventDiagnosticIdOptional.get().toString()); tracerProvider.endSpan(eventSpanContext, Signal.complete()); event.addContext(SPAN_CONTEXT, eventSpanContext); } } } return event; } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error("Error sending batch.", error); }); } private void verifyPartitionKey(String partitionKey) { if (ImplUtils.isNullOrEmpty(partitionKey)) { return; } if (isPartitionSender) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "BatchOptions.partitionKey() cannot be set when an EventHubProducer is created with" + "EventHubProducerOptions.partitionId() set. This EventHubProducer can only send events to partition '%s'.", senderOptions.partitionId()))); } else if (partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "PartitionKey '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } } /** * Disposes of the {@link EventHubAsyncProducer} by closing the underlying connection to the service. * @throws IOException if the underlying transport could not be closed and its resources could not be * disposed. */ @Override public void close() throws IOException { if (!isDisposed.getAndSet(true)) { final AmqpSendLink block = sendLinkMono.block(senderOptions.retry().tryTimeout()); if (block != null) { block.close(); } } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * ErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private volatile EventDataBatch currentBatch; EventDataCollector(BatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.maximumSizeInBytes() > 0 ? options.maximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.partitionKey(); this.contextProvider = contextProvider; currentBatch = new EventDataBatch(this.maxMessageSize, options.partitionKey(), contextProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, "EventData does not fit into maximum number of batches. '%s'", maxNumberOfBatches); throw new AmqpException(false, ErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionKey, contextProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
```suggestion this.tracers = Objects.requireNotNull(tracers, "'tracers' cannot be null."); ```
public TracerProvider(Iterable<Tracer> tracers) { this.tracers = tracers; }
this.tracers = tracers;
public TracerProvider(Iterable<Tracer> tracers) { Objects.requireNonNull(tracers, "'tracers' cannot be null."); tracers.forEach(e -> this.tracers.add(e)); }
class TracerProvider { private final Iterable<Tracer> tracers; /** * For each tracer plugged into the SDK a new tracing span is created. * * The {@code context} will be checked for containing information about a parent span. If a parent span is found the * new span will be added as a child, otherwise the span will be created and added to the context and any downstream * start calls will use the created span as the parent. * * @param context Additional metadata that is passed through the call stack. * @param processKind * @return An updated context object. */ public Context startSpan(Context context, ProcessKind processKind) { Context local = context; String spanName = "Azure.eventhubs." + processKind.getProcessKind(); for (Tracer tracer : tracers) { local = tracer.start(spanName, local, processKind); } return local; } /** * Given a context containing the current tracing span the span is marked completed with status info from {@link Signal} * For each tracer plugged into the SDK the current tracing span is marked as completed. * * @param context Additional metadata that is passed through the call stack. * @param signal The signal indicates the status and contains the metadata we need to end the tracing span. */ public void endSpan(Context context, Signal<Void> signal) { String errorCondition = ""; if (!context.getData(OPENTELEMETRY_SPAN_KEY).isPresent()) { return; } if (signal == null) { end("success", null, context); } Throwable throwable = null; if (signal != null && signal.hasError()) { throwable = signal.getThrowable(); if (throwable instanceof AmqpException) { AmqpException exception = (AmqpException) throwable; errorCondition = exception.getErrorCondition().getErrorCondition(); } } end(errorCondition, throwable, context); } /** * For each tracer plugged into the SDK a link is created between the parent tracing span and * the current service call. * * @param context Additional metadata that is passed through the call stack. */ public void addSpanLinks(Context context) { tracers.forEach(tracer -> tracer.addLink(context)); } /** * For each tracer plugged into the SDK a new context is extracted from the event's diagnostic Id. * * @param diagnosticId Unique identifier of an external call from producer to the queue. */ public Context extractContext(String diagnosticId, Context context) { Context local = context; for (Tracer tracer : tracers) { local = tracer.extractContext(diagnosticId, context); } return local; } private void end(String statusMessage, Throwable throwable, Context context) { for (Tracer tracer : tracers) { tracer.end(statusMessage, throwable, context); } } }
class TracerProvider { private final ClientLogger logger = new ClientLogger(TracerProvider.class); private final List<Tracer> tracers = new ArrayList<>(); public boolean isEnabled() { return tracers.size() > 0; } /** * For each tracer plugged into the SDK a new tracing span is created. * * The {@code context} will be checked for containing information about a parent span. If a parent span is found the * new span will be added as a child, otherwise the span will be created and added to the context and any downstream * start calls will use the created span as the parent. * * @param context Additional metadata that is passed through the call stack. * @param processKind the invoking process type. * @return An updated context object. */ public Context startSpan(Context context, ProcessKind processKind) { Context local = Objects.requireNonNull(context, "'context' cannot be null"); Objects.requireNonNull(processKind, "'processKind' cannot be null"); String spanName = getSpanName(processKind); for (Tracer tracer : tracers) { local = tracer.start(spanName, local, processKind); } return local; } /** * Given a context containing the current tracing span the span is marked completed with status info from * {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed. * * @param context Additional metadata that is passed through the call stack. * @param signal The signal indicates the status and contains the metadata we need to end the tracing span. */ public void endSpan(Context context, Signal<Void> signal) { Objects.requireNonNull(context, "'context' cannot be null"); Objects.requireNonNull(signal, "'signal' cannot be null"); if (!context.getData(OPENTELEMETRY_SPAN_KEY).isPresent()) { return; } switch (signal.getType()) { case ON_COMPLETE: end("success", null, context); break; case ON_ERROR: String errorCondition = ""; Throwable throwable = null; if (signal.hasError()) { throwable = signal.getThrowable(); if (throwable instanceof AmqpException) { AmqpException exception = (AmqpException) throwable; errorCondition = exception.getErrorCondition().getErrorCondition(); } } end(errorCondition, throwable, context); break; default: break; } } /** * For each tracer plugged into the SDK a link is created between the parent tracing span and * the current service call. * * @param context Additional metadata that is passed through the call stack. */ public void addSpanLinks(Context context) { Objects.requireNonNull(context, "'context' cannot be null"); tracers.forEach(tracer -> tracer.addLink(context)); } /** * For each tracer plugged into the SDK a new context is extracted from the event's diagnostic Id. * * @param diagnosticId Unique identifier of an external call from producer to the queue. */ public Context extractContext(String diagnosticId, Context context) { Context local = Objects.requireNonNull(context, "'context' cannot be null"); Objects.requireNonNull(diagnosticId, "'diagnosticId' cannot be null"); for (Tracer tracer : tracers) { local = tracer.extractContext(diagnosticId, local); } return local; } private void end(String statusMessage, Throwable throwable, Context context) { for (Tracer tracer : tracers) { tracer.end(statusMessage, throwable, context); } } private String getSpanName(ProcessKind processKind) { String spanName = "Azure.eventhubs."; switch (processKind) { case SEND: spanName += "send"; break; case RECEIVE: spanName += "message"; break; case PROCESS: spanName += "process"; break; default: logger.warning("Unknown processKind type: {}", processKind); break; } return spanName; } }
I'd add null checks Objects.requireNotNull in these public methods. If someone a null context is passed in, we'll get a nice error message rather than an NPE when the method tries to invoke a method on it. Similar to the public methods below.
public Context startSpan(Context context, ProcessKind processKind) { Context local = context; String spanName = "Azure.eventhubs." + processKind.getProcessKind(); for (Tracer tracer : tracers) { local = tracer.start(spanName, local, processKind); } return local; }
Context local = context;
public Context startSpan(Context context, ProcessKind processKind) { Context local = Objects.requireNonNull(context, "'context' cannot be null"); Objects.requireNonNull(processKind, "'processKind' cannot be null"); String spanName = getSpanName(processKind); for (Tracer tracer : tracers) { local = tracer.start(spanName, local, processKind); } return local; }
class TracerProvider { private final Iterable<Tracer> tracers; public TracerProvider(Iterable<Tracer> tracers) { this.tracers = tracers; } /** * For each tracer plugged into the SDK a new tracing span is created. * * The {@code context} will be checked for containing information about a parent span. If a parent span is found the * new span will be added as a child, otherwise the span will be created and added to the context and any downstream * start calls will use the created span as the parent. * * @param context Additional metadata that is passed through the call stack. * @param processKind * @return An updated context object. */ /** * Given a context containing the current tracing span the span is marked completed with status info from {@link Signal} * For each tracer plugged into the SDK the current tracing span is marked as completed. * * @param context Additional metadata that is passed through the call stack. * @param signal The signal indicates the status and contains the metadata we need to end the tracing span. */ public void endSpan(Context context, Signal<Void> signal) { String errorCondition = ""; if (!context.getData(OPENTELEMETRY_SPAN_KEY).isPresent()) { return; } if (signal == null) { end("success", null, context); } Throwable throwable = null; if (signal != null && signal.hasError()) { throwable = signal.getThrowable(); if (throwable instanceof AmqpException) { AmqpException exception = (AmqpException) throwable; errorCondition = exception.getErrorCondition().getErrorCondition(); } } end(errorCondition, throwable, context); } /** * For each tracer plugged into the SDK a link is created between the parent tracing span and * the current service call. * * @param context Additional metadata that is passed through the call stack. */ public void addSpanLinks(Context context) { tracers.forEach(tracer -> tracer.addLink(context)); } /** * For each tracer plugged into the SDK a new context is extracted from the event's diagnostic Id. * * @param diagnosticId Unique identifier of an external call from producer to the queue. */ public Context extractContext(String diagnosticId, Context context) { Context local = context; for (Tracer tracer : tracers) { local = tracer.extractContext(diagnosticId, context); } return local; } private void end(String statusMessage, Throwable throwable, Context context) { for (Tracer tracer : tracers) { tracer.end(statusMessage, throwable, context); } } }
class TracerProvider { private final ClientLogger logger = new ClientLogger(TracerProvider.class); private final List<Tracer> tracers = new ArrayList<>(); public TracerProvider(Iterable<Tracer> tracers) { Objects.requireNonNull(tracers, "'tracers' cannot be null."); tracers.forEach(e -> this.tracers.add(e)); } public boolean isEnabled() { return tracers.size() > 0; } /** * For each tracer plugged into the SDK a new tracing span is created. * * The {@code context} will be checked for containing information about a parent span. If a parent span is found the * new span will be added as a child, otherwise the span will be created and added to the context and any downstream * start calls will use the created span as the parent. * * @param context Additional metadata that is passed through the call stack. * @param processKind the invoking process type. * @return An updated context object. */ /** * Given a context containing the current tracing span the span is marked completed with status info from * {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed. * * @param context Additional metadata that is passed through the call stack. * @param signal The signal indicates the status and contains the metadata we need to end the tracing span. */ public void endSpan(Context context, Signal<Void> signal) { Objects.requireNonNull(context, "'context' cannot be null"); Objects.requireNonNull(signal, "'signal' cannot be null"); if (!context.getData(OPENTELEMETRY_SPAN_KEY).isPresent()) { return; } switch (signal.getType()) { case ON_COMPLETE: end("success", null, context); break; case ON_ERROR: String errorCondition = ""; Throwable throwable = null; if (signal.hasError()) { throwable = signal.getThrowable(); if (throwable instanceof AmqpException) { AmqpException exception = (AmqpException) throwable; errorCondition = exception.getErrorCondition().getErrorCondition(); } } end(errorCondition, throwable, context); break; default: break; } } /** * For each tracer plugged into the SDK a link is created between the parent tracing span and * the current service call. * * @param context Additional metadata that is passed through the call stack. */ public void addSpanLinks(Context context) { Objects.requireNonNull(context, "'context' cannot be null"); tracers.forEach(tracer -> tracer.addLink(context)); } /** * For each tracer plugged into the SDK a new context is extracted from the event's diagnostic Id. * * @param diagnosticId Unique identifier of an external call from producer to the queue. */ public Context extractContext(String diagnosticId, Context context) { Context local = Objects.requireNonNull(context, "'context' cannot be null"); Objects.requireNonNull(diagnosticId, "'diagnosticId' cannot be null"); for (Tracer tracer : tracers) { local = tracer.extractContext(diagnosticId, local); } return local; } private void end(String statusMessage, Throwable throwable, Context context) { for (Tracer tracer : tracers) { tracer.end(statusMessage, throwable, context); } } private String getSpanName(ProcessKind processKind) { String spanName = "Azure.eventhubs."; switch (processKind) { case SEND: spanName += "send"; break; case RECEIVE: spanName += "message"; break; case PROCESS: spanName += "process"; break; default: logger.warning("Unknown processKind type: {}", processKind); break; } return spanName; } }
I'd move this declaration to line 60. This variable doesn't need to be allocated until after those checks are completed.
public void endSpan(Context context, Signal<Void> signal) { String errorCondition = ""; if (!context.getData(OPENTELEMETRY_SPAN_KEY).isPresent()) { return; } if (signal == null) { end("success", null, context); } Throwable throwable = null; if (signal != null && signal.hasError()) { throwable = signal.getThrowable(); if (throwable instanceof AmqpException) { AmqpException exception = (AmqpException) throwable; errorCondition = exception.getErrorCondition().getErrorCondition(); } } end(errorCondition, throwable, context); }
String errorCondition = "";
public void endSpan(Context context, Signal<Void> signal) { Objects.requireNonNull(context, "'context' cannot be null"); Objects.requireNonNull(signal, "'signal' cannot be null"); if (!context.getData(OPENTELEMETRY_SPAN_KEY).isPresent()) { return; } switch (signal.getType()) { case ON_COMPLETE: end("success", null, context); break; case ON_ERROR: String errorCondition = ""; Throwable throwable = null; if (signal.hasError()) { throwable = signal.getThrowable(); if (throwable instanceof AmqpException) { AmqpException exception = (AmqpException) throwable; errorCondition = exception.getErrorCondition().getErrorCondition(); } } end(errorCondition, throwable, context); break; default: break; } }
class TracerProvider { private final Iterable<Tracer> tracers; public TracerProvider(Iterable<Tracer> tracers) { this.tracers = tracers; } /** * For each tracer plugged into the SDK a new tracing span is created. * * The {@code context} will be checked for containing information about a parent span. If a parent span is found the * new span will be added as a child, otherwise the span will be created and added to the context and any downstream * start calls will use the created span as the parent. * * @param context Additional metadata that is passed through the call stack. * @param processKind * @return An updated context object. */ public Context startSpan(Context context, ProcessKind processKind) { Context local = context; String spanName = "Azure.eventhubs." + processKind.getProcessKind(); for (Tracer tracer : tracers) { local = tracer.start(spanName, local, processKind); } return local; } /** * Given a context containing the current tracing span the span is marked completed with status info from {@link Signal} * For each tracer plugged into the SDK the current tracing span is marked as completed. * * @param context Additional metadata that is passed through the call stack. * @param signal The signal indicates the status and contains the metadata we need to end the tracing span. */ /** * For each tracer plugged into the SDK a link is created between the parent tracing span and * the current service call. * * @param context Additional metadata that is passed through the call stack. */ public void addSpanLinks(Context context) { tracers.forEach(tracer -> tracer.addLink(context)); } /** * For each tracer plugged into the SDK a new context is extracted from the event's diagnostic Id. * * @param diagnosticId Unique identifier of an external call from producer to the queue. */ public Context extractContext(String diagnosticId, Context context) { Context local = context; for (Tracer tracer : tracers) { local = tracer.extractContext(diagnosticId, context); } return local; } private void end(String statusMessage, Throwable throwable, Context context) { for (Tracer tracer : tracers) { tracer.end(statusMessage, throwable, context); } } }
class TracerProvider { private final ClientLogger logger = new ClientLogger(TracerProvider.class); private final List<Tracer> tracers = new ArrayList<>(); public TracerProvider(Iterable<Tracer> tracers) { Objects.requireNonNull(tracers, "'tracers' cannot be null."); tracers.forEach(e -> this.tracers.add(e)); } public boolean isEnabled() { return tracers.size() > 0; } /** * For each tracer plugged into the SDK a new tracing span is created. * * The {@code context} will be checked for containing information about a parent span. If a parent span is found the * new span will be added as a child, otherwise the span will be created and added to the context and any downstream * start calls will use the created span as the parent. * * @param context Additional metadata that is passed through the call stack. * @param processKind the invoking process type. * @return An updated context object. */ public Context startSpan(Context context, ProcessKind processKind) { Context local = Objects.requireNonNull(context, "'context' cannot be null"); Objects.requireNonNull(processKind, "'processKind' cannot be null"); String spanName = getSpanName(processKind); for (Tracer tracer : tracers) { local = tracer.start(spanName, local, processKind); } return local; } /** * Given a context containing the current tracing span the span is marked completed with status info from * {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed. * * @param context Additional metadata that is passed through the call stack. * @param signal The signal indicates the status and contains the metadata we need to end the tracing span. */ /** * For each tracer plugged into the SDK a link is created between the parent tracing span and * the current service call. * * @param context Additional metadata that is passed through the call stack. */ public void addSpanLinks(Context context) { Objects.requireNonNull(context, "'context' cannot be null"); tracers.forEach(tracer -> tracer.addLink(context)); } /** * For each tracer plugged into the SDK a new context is extracted from the event's diagnostic Id. * * @param diagnosticId Unique identifier of an external call from producer to the queue. */ public Context extractContext(String diagnosticId, Context context) { Context local = Objects.requireNonNull(context, "'context' cannot be null"); Objects.requireNonNull(diagnosticId, "'diagnosticId' cannot be null"); for (Tracer tracer : tracers) { local = tracer.extractContext(diagnosticId, local); } return local; } private void end(String statusMessage, Throwable throwable, Context context) { for (Tracer tracer : tracers) { tracer.end(statusMessage, throwable, context); } } private String getSpanName(ProcessKind processKind) { String spanName = "Azure.eventhubs."; switch (processKind) { case SEND: spanName += "send"; break; case RECEIVE: spanName += "message"; break; case PROCESS: spanName += "process"; break; default: logger.warning("Unknown processKind type: {}", processKind); break; } return spanName; } }
I don't know if Srikanta's comment was resolved. Isn't the context only resolved for the last tracer?
public Context extractContext(String diagnosticId, Context context) { Context local = context; for (Tracer tracer : tracers) { local = tracer.extractContext(diagnosticId, context); } return local; }
local = tracer.extractContext(diagnosticId, context);
public Context extractContext(String diagnosticId, Context context) { Context local = Objects.requireNonNull(context, "'context' cannot be null"); Objects.requireNonNull(diagnosticId, "'diagnosticId' cannot be null"); for (Tracer tracer : tracers) { local = tracer.extractContext(diagnosticId, local); } return local; }
class TracerProvider { private final Iterable<Tracer> tracers; public TracerProvider(Iterable<Tracer> tracers) { this.tracers = tracers; } /** * For each tracer plugged into the SDK a new tracing span is created. * * The {@code context} will be checked for containing information about a parent span. If a parent span is found the * new span will be added as a child, otherwise the span will be created and added to the context and any downstream * start calls will use the created span as the parent. * * @param context Additional metadata that is passed through the call stack. * @param processKind * @return An updated context object. */ public Context startSpan(Context context, ProcessKind processKind) { Context local = context; String spanName = "Azure.eventhubs." + processKind.getProcessKind(); for (Tracer tracer : tracers) { local = tracer.start(spanName, local, processKind); } return local; } /** * Given a context containing the current tracing span the span is marked completed with status info from {@link Signal} * For each tracer plugged into the SDK the current tracing span is marked as completed. * * @param context Additional metadata that is passed through the call stack. * @param signal The signal indicates the status and contains the metadata we need to end the tracing span. */ public void endSpan(Context context, Signal<Void> signal) { String errorCondition = ""; if (!context.getData(OPENTELEMETRY_SPAN_KEY).isPresent()) { return; } if (signal == null) { end("success", null, context); } Throwable throwable = null; if (signal != null && signal.hasError()) { throwable = signal.getThrowable(); if (throwable instanceof AmqpException) { AmqpException exception = (AmqpException) throwable; errorCondition = exception.getErrorCondition().getErrorCondition(); } } end(errorCondition, throwable, context); } /** * For each tracer plugged into the SDK a link is created between the parent tracing span and * the current service call. * * @param context Additional metadata that is passed through the call stack. */ public void addSpanLinks(Context context) { tracers.forEach(tracer -> tracer.addLink(context)); } /** * For each tracer plugged into the SDK a new context is extracted from the event's diagnostic Id. * * @param diagnosticId Unique identifier of an external call from producer to the queue. */ private void end(String statusMessage, Throwable throwable, Context context) { for (Tracer tracer : tracers) { tracer.end(statusMessage, throwable, context); } } }
class TracerProvider { private final ClientLogger logger = new ClientLogger(TracerProvider.class); private final List<Tracer> tracers = new ArrayList<>(); public TracerProvider(Iterable<Tracer> tracers) { Objects.requireNonNull(tracers, "'tracers' cannot be null."); tracers.forEach(e -> this.tracers.add(e)); } public boolean isEnabled() { return tracers.size() > 0; } /** * For each tracer plugged into the SDK a new tracing span is created. * * The {@code context} will be checked for containing information about a parent span. If a parent span is found the * new span will be added as a child, otherwise the span will be created and added to the context and any downstream * start calls will use the created span as the parent. * * @param context Additional metadata that is passed through the call stack. * @param processKind the invoking process type. * @return An updated context object. */ public Context startSpan(Context context, ProcessKind processKind) { Context local = Objects.requireNonNull(context, "'context' cannot be null"); Objects.requireNonNull(processKind, "'processKind' cannot be null"); String spanName = getSpanName(processKind); for (Tracer tracer : tracers) { local = tracer.start(spanName, local, processKind); } return local; } /** * Given a context containing the current tracing span the span is marked completed with status info from * {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed. * * @param context Additional metadata that is passed through the call stack. * @param signal The signal indicates the status and contains the metadata we need to end the tracing span. */ public void endSpan(Context context, Signal<Void> signal) { Objects.requireNonNull(context, "'context' cannot be null"); Objects.requireNonNull(signal, "'signal' cannot be null"); if (!context.getData(OPENTELEMETRY_SPAN_KEY).isPresent()) { return; } switch (signal.getType()) { case ON_COMPLETE: end("success", null, context); break; case ON_ERROR: String errorCondition = ""; Throwable throwable = null; if (signal.hasError()) { throwable = signal.getThrowable(); if (throwable instanceof AmqpException) { AmqpException exception = (AmqpException) throwable; errorCondition = exception.getErrorCondition().getErrorCondition(); } } end(errorCondition, throwable, context); break; default: break; } } /** * For each tracer plugged into the SDK a link is created between the parent tracing span and * the current service call. * * @param context Additional metadata that is passed through the call stack. */ public void addSpanLinks(Context context) { Objects.requireNonNull(context, "'context' cannot be null"); tracers.forEach(tracer -> tracer.addLink(context)); } /** * For each tracer plugged into the SDK a new context is extracted from the event's diagnostic Id. * * @param diagnosticId Unique identifier of an external call from producer to the queue. */ private void end(String statusMessage, Throwable throwable, Context context) { for (Tracer tracer : tracers) { tracer.end(statusMessage, throwable, context); } } private String getSpanName(ProcessKind processKind) { String spanName = "Azure.eventhubs."; switch (processKind) { case SEND: spanName += "send"; break; case RECEIVE: spanName += "message"; break; case PROCESS: spanName += "process"; break; default: logger.warning("Unknown processKind type: {}", processKind); break; } return spanName; } }
You don't need to specify `this.`
public EventHubAsyncProducer createProducer(EventHubProducerOptions options) { Objects.requireNonNull(options); final EventHubProducerOptions clonedOptions = options.clone(); if (clonedOptions.retry() == null) { clonedOptions.retry(connectionOptions.retry()); } final String entityPath; final String linkName; if (ImplUtils.isNullOrEmpty(options.partitionId())) { entityPath = eventHubName; linkName = StringUtil.getRandomString("EC"); } else { entityPath = String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, options.partitionId()); linkName = StringUtil.getRandomString("PS"); } final Mono<AmqpSendLink> amqpLinkMono = connectionMono .flatMap(connection -> connection.createSession(entityPath)) .flatMap(session -> { logger.verbose("Creating producer for {}", entityPath); final RetryPolicy retryPolicy = RetryUtil.getRetryPolicy(clonedOptions.retry()); return session.createProducer(linkName, entityPath, clonedOptions.retry().tryTimeout(), retryPolicy) .cast(AmqpSendLink.class); }); return new EventHubAsyncProducer(amqpLinkMono, clonedOptions, this.tracerProvider); }
return new EventHubAsyncProducer(amqpLinkMono, clonedOptions, this.tracerProvider);
public EventHubAsyncProducer createProducer(EventHubProducerOptions options) { Objects.requireNonNull(options, "'options' cannot be null."); final EventHubProducerOptions clonedOptions = options.clone(); if (clonedOptions.retry() == null) { clonedOptions.retry(connectionOptions.retry()); } final String entityPath; final String linkName; if (ImplUtils.isNullOrEmpty(options.partitionId())) { entityPath = eventHubName; linkName = StringUtil.getRandomString("EC"); } else { entityPath = String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, options.partitionId()); linkName = StringUtil.getRandomString("PS"); } final Mono<AmqpSendLink> amqpLinkMono = connectionMono .flatMap(connection -> connection.createSession(entityPath)) .flatMap(session -> { logger.verbose("Creating producer for {}", entityPath); final RetryPolicy retryPolicy = RetryUtil.getRetryPolicy(clonedOptions.retry()); return session.createProducer(linkName, entityPath, clonedOptions.retry().tryTimeout(), retryPolicy) .cast(AmqpSendLink.class); }); return new EventHubAsyncProducer(amqpLinkMono, clonedOptions, tracerProvider); }
class EventHubAsyncClient implements Closeable { /** * The name of the default consumer group in the Event Hubs service. */ public static final String DEFAULT_CONSUMER_GROUP_NAME = "$Default"; private static final String RECEIVER_ENTITY_PATH_FORMAT = "%s/ConsumerGroups/%s/Partitions/%s"; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private final ClientLogger logger = new ClientLogger(EventHubAsyncClient.class); private final String connectionId; private final Mono<EventHubConnection> connectionMono; private final AtomicBoolean hasConnection = new AtomicBoolean(false); private final ConnectionOptions connectionOptions; private final String eventHubName; private final EventHubProducerOptions defaultProducerOptions; private final EventHubConsumerOptions defaultConsumerOptions; private final TracerProvider tracerProvider; EventHubAsyncClient(ConnectionOptions connectionOptions, ReactorProvider provider, ReactorHandlerProvider handlerProvider, TracerProvider tracerProvider) { Objects.requireNonNull(connectionOptions); Objects.requireNonNull(provider); Objects.requireNonNull(handlerProvider); this.connectionOptions = connectionOptions; this.tracerProvider = tracerProvider; this.eventHubName = connectionOptions.eventHubName(); this.connectionId = StringUtil.getRandomString("MF"); this.connectionMono = Mono.fromCallable(() -> { return (EventHubConnection) new ReactorConnection(connectionId, connectionOptions, provider, handlerProvider, new ResponseMapper()); }).doOnSubscribe(c -> hasConnection.set(true)) .cache(); this.defaultProducerOptions = new EventHubProducerOptions() .retry(connectionOptions.retry()); this.defaultConsumerOptions = new EventHubConsumerOptions() .retry(connectionOptions.retry()) .scheduler(connectionOptions.scheduler()); } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getProperties() { return connectionMono.flatMap(connection -> connection.getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties)); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ @ServiceMethod(returns = ReturnType.COLLECTION) public Flux<String> getPartitionIds() { return getProperties().flatMapMany(properties -> Flux.fromArray(properties.partitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionMono.flatMap( connection -> connection.getManagementNode().flatMap(node -> { return node.getPartitionProperties(partitionId); })); } /** * Creates an Event Hub producer responsible for transmitting {@link EventData} to the Event Hub, grouped together * in batches. Event data is automatically routed to an available partition. * * @return A new {@link EventHubAsyncProducer}. */ public EventHubAsyncProducer createProducer() { return createProducer(defaultProducerOptions); } /** * Creates an Event Hub producer responsible for transmitting {@link EventData} to the Event Hub, grouped together * in batches. If {@link EventHubProducerOptions * events are routed to that specific partition. Otherwise, events are automatically routed to an available * partition. * * @param options The set of options to apply when creating the producer. * @return A new {@link EventHubAsyncProducer}. * @throws NullPointerException if {@code options} is {@code null}. */ /** * Creates an Event Hub consumer responsible for reading {@link EventData} from a specific Event Hub partition, as a * member of the specified consumer group, and begins reading events from the {@code eventPosition}. * * The consumer created is non-exclusive, allowing multiple consumers from the same consumer group to be actively * reading events from the partition. These non-exclusive consumers are sometimes referred to as "Non-epoch * Consumers". * * @param consumerGroup The name of the consumer group this consumer is associated with. Events are read in * the context of this group. The name of the consumer group that is created by default is {@link * * @param partitionId The identifier of the Event Hub partition. * @param eventPosition The position within the partition where the consumer should begin reading events. * @return A new {@link EventHubAsyncConsumer} that receives events from the partition at the given position. * @throws NullPointerException If {@code eventPosition}, or {@code options} is {@code null}. * @throws IllegalArgumentException If {@code consumerGroup} or {@code partitionId} is {@code null} or an * empty string. */ public EventHubAsyncConsumer createConsumer(String consumerGroup, String partitionId, EventPosition eventPosition) { return createConsumer(consumerGroup, partitionId, eventPosition, defaultConsumerOptions); } /** * Creates an Event Hub consumer responsible for reading {@link EventData} from a specific Event Hub partition, as a * member of the configured consumer group, and begins reading events from the specified {@code eventPosition}. * * <p> * A consumer may be exclusive, which asserts ownership over the partition for the consumer group to ensure that * only one consumer from that group is reading the from the partition. These exclusive consumers are sometimes * referred to as "Epoch Consumers." * * A consumer may also be non-exclusive, allowing multiple consumers from the same consumer group to be actively * reading events from the partition. These non-exclusive consumers are sometimes referred to as "Non-epoch * Consumers." * * Designating a consumer as exclusive may be specified in the {@code options}, by setting {@link * EventHubConsumerOptions * non-exclusive. * </p> * * @param consumerGroup The name of the consumer group this consumer is associated with. Events are read in * the context of this group. The name of the consumer group that is created by default is {@link * * @param partitionId The identifier of the Event Hub partition from which events will be received. * @param eventPosition The position within the partition where the consumer should begin reading events. * @param options The set of options to apply when creating the consumer. * @return An new {@link EventHubAsyncConsumer} that receives events from the partition with all configured {@link * EventHubConsumerOptions}. * @throws NullPointerException If {@code eventPosition}, {@code consumerGroup}, {@code partitionId}, or {@code * options} is {@code null}. * @throws IllegalArgumentException If {@code consumerGroup} or {@code partitionId} is an empty string. */ public EventHubAsyncConsumer createConsumer(String consumerGroup, String partitionId, EventPosition eventPosition, EventHubConsumerOptions options) { Objects.requireNonNull(eventPosition); Objects.requireNonNull(options); Objects.requireNonNull(consumerGroup); Objects.requireNonNull(partitionId); if (ImplUtils.isNullOrEmpty(consumerGroup)) { throw logger.logExceptionAsError(new IllegalArgumentException("'consumerGroup' cannot be an empty string.")); } if (ImplUtils.isNullOrEmpty(partitionId)) { throw logger.logExceptionAsError(new IllegalArgumentException("'partitionId' cannot be an empty string.")); } final EventHubConsumerOptions clonedOptions = options.clone(); if (clonedOptions.scheduler() == null) { clonedOptions.scheduler(connectionOptions.scheduler()); } if (clonedOptions.retry() == null) { clonedOptions.retry(connectionOptions.retry()); } final String linkName = StringUtil.getRandomString("PR"); final String entityPath = String.format(Locale.US, RECEIVER_ENTITY_PATH_FORMAT, eventHubName, consumerGroup, partitionId); final Mono<AmqpReceiveLink> receiveLinkMono = connectionMono.flatMap(connection -> { return connection.createSession(entityPath).cast(EventHubSession.class); }).flatMap(session -> { logger.verbose("Creating consumer for path: {}", entityPath); final RetryPolicy retryPolicy = RetryUtil.getRetryPolicy(clonedOptions.retry()); return session.createConsumer(linkName, entityPath, getExpression(eventPosition), clonedOptions.retry().tryTimeout(), retryPolicy, options.ownerLevel(), options.identifier()) .cast(AmqpReceiveLink.class); }); return new EventHubAsyncConsumer(receiveLinkMono, clonedOptions); } /** * Closes and disposes of connection to service. Any {@link EventHubAsyncConsumer EventHubConsumers} and {@link * EventHubAsyncProducer EventHubProducers} created with this instance will have their connections closed. */ @Override public void close() { if (hasConnection.getAndSet(false)) { try { final AmqpConnection connection = connectionMono.block(connectionOptions.retry().tryTimeout()); if (connection != null) { connection.close(); } } catch (IOException exception) { throw logger.logExceptionAsError(new AmqpException(false, "Unable to close connection to service", exception, new ErrorContext(connectionOptions.host()))); } } } private static String getExpression(EventPosition eventPosition) { final String isInclusiveFlag = eventPosition.isInclusive() ? "=" : ""; if (eventPosition.offset() != null) { return String.format(AmqpConstants.AMQP_ANNOTATION_FORMAT, OFFSET_ANNOTATION_NAME.getValue(), isInclusiveFlag, eventPosition.offset()); } if (eventPosition.sequenceNumber() != null) { return String.format(AmqpConstants.AMQP_ANNOTATION_FORMAT, SEQUENCE_NUMBER_ANNOTATION_NAME.getValue(), isInclusiveFlag, eventPosition.sequenceNumber()); } if (eventPosition.enqueuedDateTime() != null) { String ms; try { ms = Long.toString(eventPosition.enqueuedDateTime().toEpochMilli()); } catch (ArithmeticException ex) { ms = Long.toString(Long.MAX_VALUE); } return String.format(AmqpConstants.AMQP_ANNOTATION_FORMAT, ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue(), isInclusiveFlag, ms); } throw new IllegalArgumentException("No starting position was set."); } private static class ResponseMapper implements AmqpResponseMapper { @Override public EventHubProperties toEventHubProperties(Map<?, ?> amqpBody) { return new EventHubProperties( (String) amqpBody.get(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY), ((Date) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_CREATED_AT)).toInstant(), (String[]) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IDS)); } @Override public PartitionProperties toPartitionProperties(Map<?, ?> amqpBody) { return new PartitionProperties( (String) amqpBody.get(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY), (String) amqpBody.get(ManagementChannel.MANAGEMENT_PARTITION_NAME_KEY), (Long) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_BEGIN_SEQUENCE_NUMBER), (Long) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_SEQUENCE_NUMBER), (String) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_OFFSET), ((Date) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_TIME_UTC)).toInstant(), (Boolean) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IS_EMPTY)); } } }
class EventHubAsyncClient implements Closeable { /** * The name of the default consumer group in the Event Hubs service. */ public static final String DEFAULT_CONSUMER_GROUP_NAME = "$Default"; private static final String RECEIVER_ENTITY_PATH_FORMAT = "%s/ConsumerGroups/%s/Partitions/%s"; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private final ClientLogger logger = new ClientLogger(EventHubAsyncClient.class); private final String connectionId; private final Mono<EventHubConnection> connectionMono; private final AtomicBoolean hasConnection = new AtomicBoolean(false); private final ConnectionOptions connectionOptions; private final String eventHubName; private final EventHubProducerOptions defaultProducerOptions; private final EventHubConsumerOptions defaultConsumerOptions; private final TracerProvider tracerProvider; EventHubAsyncClient(ConnectionOptions connectionOptions, ReactorProvider provider, ReactorHandlerProvider handlerProvider, TracerProvider tracerProvider) { Objects.requireNonNull(connectionOptions, "'connectionOptions' cannot be null."); Objects.requireNonNull(provider, "'provider' cannot be null."); Objects.requireNonNull(handlerProvider, "'handlerProvider' cannot be null."); Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.connectionOptions = connectionOptions; this.tracerProvider = tracerProvider; this.eventHubName = connectionOptions.eventHubName(); this.connectionId = StringUtil.getRandomString("MF"); this.connectionMono = Mono.fromCallable(() -> { return (EventHubConnection) new ReactorConnection(connectionId, connectionOptions, provider, handlerProvider, new ResponseMapper()); }).doOnSubscribe(c -> hasConnection.set(true)) .cache(); this.defaultProducerOptions = new EventHubProducerOptions() .retry(connectionOptions.retry()); this.defaultConsumerOptions = new EventHubConsumerOptions() .retry(connectionOptions.retry()) .scheduler(connectionOptions.scheduler()); } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getProperties() { return connectionMono.flatMap(connection -> connection.getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties)); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ @ServiceMethod(returns = ReturnType.COLLECTION) public Flux<String> getPartitionIds() { return getProperties().flatMapMany(properties -> Flux.fromArray(properties.partitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionMono.flatMap( connection -> connection.getManagementNode().flatMap(node -> { return node.getPartitionProperties(partitionId); })); } /** * Creates an Event Hub producer responsible for transmitting {@link EventData} to the Event Hub, grouped together * in batches. Event data is automatically routed to an available partition. * * @return A new {@link EventHubAsyncProducer}. */ public EventHubAsyncProducer createProducer() { return createProducer(defaultProducerOptions); } /** * Creates an Event Hub producer responsible for transmitting {@link EventData} to the Event Hub, grouped together * in batches. If {@link EventHubProducerOptions * events are routed to that specific partition. Otherwise, events are automatically routed to an available * partition. * * @param options The set of options to apply when creating the producer. * @return A new {@link EventHubAsyncProducer}. * @throws NullPointerException if {@code options} is {@code null}. */ /** * Creates an Event Hub consumer responsible for reading {@link EventData} from a specific Event Hub partition, as a * member of the specified consumer group, and begins reading events from the {@code eventPosition}. * * The consumer created is non-exclusive, allowing multiple consumers from the same consumer group to be actively * reading events from the partition. These non-exclusive consumers are sometimes referred to as "Non-epoch * Consumers". * * @param consumerGroup The name of the consumer group this consumer is associated with. Events are read in * the context of this group. The name of the consumer group that is created by default is {@link * * @param partitionId The identifier of the Event Hub partition. * @param eventPosition The position within the partition where the consumer should begin reading events. * @return A new {@link EventHubAsyncConsumer} that receives events from the partition at the given position. * @throws NullPointerException If {@code eventPosition}, or {@code options} is {@code null}. * @throws IllegalArgumentException If {@code consumerGroup} or {@code partitionId} is {@code null} or an * empty string. */ public EventHubAsyncConsumer createConsumer(String consumerGroup, String partitionId, EventPosition eventPosition) { return createConsumer(consumerGroup, partitionId, eventPosition, defaultConsumerOptions); } /** * Creates an Event Hub consumer responsible for reading {@link EventData} from a specific Event Hub partition, as a * member of the configured consumer group, and begins reading events from the specified {@code eventPosition}. * * <p> * A consumer may be exclusive, which asserts ownership over the partition for the consumer group to ensure that * only one consumer from that group is reading the from the partition. These exclusive consumers are sometimes * referred to as "Epoch Consumers." * * A consumer may also be non-exclusive, allowing multiple consumers from the same consumer group to be actively * reading events from the partition. These non-exclusive consumers are sometimes referred to as "Non-epoch * Consumers." * * Designating a consumer as exclusive may be specified in the {@code options}, by setting {@link * EventHubConsumerOptions * non-exclusive. * </p> * * @param consumerGroup The name of the consumer group this consumer is associated with. Events are read in * the context of this group. The name of the consumer group that is created by default is {@link * * @param partitionId The identifier of the Event Hub partition from which events will be received. * @param eventPosition The position within the partition where the consumer should begin reading events. * @param options The set of options to apply when creating the consumer. * @return An new {@link EventHubAsyncConsumer} that receives events from the partition with all configured {@link * EventHubConsumerOptions}. * @throws NullPointerException If {@code eventPosition}, {@code consumerGroup}, {@code partitionId}, or * {@code options} is {@code null}. * @throws IllegalArgumentException If {@code consumerGroup} or {@code partitionId} is an empty string. */ public EventHubAsyncConsumer createConsumer(String consumerGroup, String partitionId, EventPosition eventPosition, EventHubConsumerOptions options) { Objects.requireNonNull(eventPosition, "'eventPosition' cannot be null."); Objects.requireNonNull(options, "'options' cannot be null."); Objects.requireNonNull(consumerGroup, "'consumerGroup' cannot be null."); Objects.requireNonNull(partitionId, "'partitionId' cannot be null."); if (consumerGroup.isEmpty()) { throw logger.logExceptionAsError(new IllegalArgumentException("'consumerGroup' cannot be an empty string.")); } else if (partitionId.isEmpty()) { throw logger.logExceptionAsError(new IllegalArgumentException("'partitionId' cannot be an empty string.")); } final EventHubConsumerOptions clonedOptions = options.clone(); if (clonedOptions.scheduler() == null) { clonedOptions.scheduler(connectionOptions.scheduler()); } if (clonedOptions.retry() == null) { clonedOptions.retry(connectionOptions.retry()); } final String linkName = StringUtil.getRandomString("PR"); final String entityPath = String.format(Locale.US, RECEIVER_ENTITY_PATH_FORMAT, eventHubName, consumerGroup, partitionId); final Mono<AmqpReceiveLink> receiveLinkMono = connectionMono.flatMap(connection -> { return connection.createSession(entityPath).cast(EventHubSession.class); }).flatMap(session -> { logger.verbose("Creating consumer for path: {}", entityPath); final RetryPolicy retryPolicy = RetryUtil.getRetryPolicy(clonedOptions.retry()); return session.createConsumer(linkName, entityPath, getExpression(eventPosition), clonedOptions.retry().tryTimeout(), retryPolicy, options.ownerLevel(), options.identifier()) .cast(AmqpReceiveLink.class); }); return new EventHubAsyncConsumer(receiveLinkMono, clonedOptions); } /** * Closes and disposes of connection to service. Any {@link EventHubAsyncConsumer EventHubConsumers} and {@link * EventHubAsyncProducer EventHubProducers} created with this instance will have their connections closed. */ @Override public void close() { if (hasConnection.getAndSet(false)) { try { final AmqpConnection connection = connectionMono.block(connectionOptions.retry().tryTimeout()); if (connection != null) { connection.close(); } } catch (IOException exception) { throw logger.logExceptionAsError(new AmqpException(false, "Unable to close connection to service", exception, new ErrorContext(connectionOptions.host()))); } } } private static String getExpression(EventPosition eventPosition) { final String isInclusiveFlag = eventPosition.isInclusive() ? "=" : ""; if (eventPosition.offset() != null) { return String.format(AmqpConstants.AMQP_ANNOTATION_FORMAT, OFFSET_ANNOTATION_NAME.getValue(), isInclusiveFlag, eventPosition.offset()); } if (eventPosition.sequenceNumber() != null) { return String.format(AmqpConstants.AMQP_ANNOTATION_FORMAT, SEQUENCE_NUMBER_ANNOTATION_NAME.getValue(), isInclusiveFlag, eventPosition.sequenceNumber()); } if (eventPosition.enqueuedDateTime() != null) { String ms; try { ms = Long.toString(eventPosition.enqueuedDateTime().toEpochMilli()); } catch (ArithmeticException ex) { ms = Long.toString(Long.MAX_VALUE); } return String.format(AmqpConstants.AMQP_ANNOTATION_FORMAT, ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue(), isInclusiveFlag, ms); } throw new IllegalArgumentException("No starting position was set."); } private static class ResponseMapper implements AmqpResponseMapper { @Override public EventHubProperties toEventHubProperties(Map<?, ?> amqpBody) { return new EventHubProperties( (String) amqpBody.get(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY), ((Date) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_CREATED_AT)).toInstant(), (String[]) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IDS)); } @Override public PartitionProperties toPartitionProperties(Map<?, ?> amqpBody) { return new PartitionProperties( (String) amqpBody.get(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY), (String) amqpBody.get(ManagementChannel.MANAGEMENT_PARTITION_NAME_KEY), (Long) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_BEGIN_SEQUENCE_NUMBER), (Long) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_SEQUENCE_NUMBER), (String) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_OFFSET), ((Date) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_TIME_UTC)).toInstant(), (Boolean) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IS_EMPTY)); } } }
I try not to specify `this.` unless I'm in a constructor or there is a variable name collision in a method. Otherwise, it is verbose without adding additional information, imho (we don't have any spec around this).
public EventHubAsyncProducer createProducer(EventHubProducerOptions options) { Objects.requireNonNull(options); final EventHubProducerOptions clonedOptions = options.clone(); if (clonedOptions.retry() == null) { clonedOptions.retry(connectionOptions.retry()); } final String entityPath; final String linkName; if (ImplUtils.isNullOrEmpty(options.partitionId())) { entityPath = eventHubName; linkName = StringUtil.getRandomString("EC"); } else { entityPath = String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, options.partitionId()); linkName = StringUtil.getRandomString("PS"); } final Mono<AmqpSendLink> amqpLinkMono = connectionMono .flatMap(connection -> connection.createSession(entityPath)) .flatMap(session -> { logger.verbose("Creating producer for {}", entityPath); final RetryPolicy retryPolicy = RetryUtil.getRetryPolicy(clonedOptions.retry()); return session.createProducer(linkName, entityPath, clonedOptions.retry().tryTimeout(), retryPolicy) .cast(AmqpSendLink.class); }); return new EventHubAsyncProducer(amqpLinkMono, clonedOptions, this.tracerProvider); }
return new EventHubAsyncProducer(amqpLinkMono, clonedOptions, this.tracerProvider);
public EventHubAsyncProducer createProducer(EventHubProducerOptions options) { Objects.requireNonNull(options, "'options' cannot be null."); final EventHubProducerOptions clonedOptions = options.clone(); if (clonedOptions.retry() == null) { clonedOptions.retry(connectionOptions.retry()); } final String entityPath; final String linkName; if (ImplUtils.isNullOrEmpty(options.partitionId())) { entityPath = eventHubName; linkName = StringUtil.getRandomString("EC"); } else { entityPath = String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, options.partitionId()); linkName = StringUtil.getRandomString("PS"); } final Mono<AmqpSendLink> amqpLinkMono = connectionMono .flatMap(connection -> connection.createSession(entityPath)) .flatMap(session -> { logger.verbose("Creating producer for {}", entityPath); final RetryPolicy retryPolicy = RetryUtil.getRetryPolicy(clonedOptions.retry()); return session.createProducer(linkName, entityPath, clonedOptions.retry().tryTimeout(), retryPolicy) .cast(AmqpSendLink.class); }); return new EventHubAsyncProducer(amqpLinkMono, clonedOptions, tracerProvider); }
class EventHubAsyncClient implements Closeable { /** * The name of the default consumer group in the Event Hubs service. */ public static final String DEFAULT_CONSUMER_GROUP_NAME = "$Default"; private static final String RECEIVER_ENTITY_PATH_FORMAT = "%s/ConsumerGroups/%s/Partitions/%s"; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private final ClientLogger logger = new ClientLogger(EventHubAsyncClient.class); private final String connectionId; private final Mono<EventHubConnection> connectionMono; private final AtomicBoolean hasConnection = new AtomicBoolean(false); private final ConnectionOptions connectionOptions; private final String eventHubName; private final EventHubProducerOptions defaultProducerOptions; private final EventHubConsumerOptions defaultConsumerOptions; private final TracerProvider tracerProvider; EventHubAsyncClient(ConnectionOptions connectionOptions, ReactorProvider provider, ReactorHandlerProvider handlerProvider, TracerProvider tracerProvider) { Objects.requireNonNull(connectionOptions); Objects.requireNonNull(provider); Objects.requireNonNull(handlerProvider); this.connectionOptions = connectionOptions; this.tracerProvider = tracerProvider; this.eventHubName = connectionOptions.eventHubName(); this.connectionId = StringUtil.getRandomString("MF"); this.connectionMono = Mono.fromCallable(() -> { return (EventHubConnection) new ReactorConnection(connectionId, connectionOptions, provider, handlerProvider, new ResponseMapper()); }).doOnSubscribe(c -> hasConnection.set(true)) .cache(); this.defaultProducerOptions = new EventHubProducerOptions() .retry(connectionOptions.retry()); this.defaultConsumerOptions = new EventHubConsumerOptions() .retry(connectionOptions.retry()) .scheduler(connectionOptions.scheduler()); } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getProperties() { return connectionMono.flatMap(connection -> connection.getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties)); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ @ServiceMethod(returns = ReturnType.COLLECTION) public Flux<String> getPartitionIds() { return getProperties().flatMapMany(properties -> Flux.fromArray(properties.partitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionMono.flatMap( connection -> connection.getManagementNode().flatMap(node -> { return node.getPartitionProperties(partitionId); })); } /** * Creates an Event Hub producer responsible for transmitting {@link EventData} to the Event Hub, grouped together * in batches. Event data is automatically routed to an available partition. * * @return A new {@link EventHubAsyncProducer}. */ public EventHubAsyncProducer createProducer() { return createProducer(defaultProducerOptions); } /** * Creates an Event Hub producer responsible for transmitting {@link EventData} to the Event Hub, grouped together * in batches. If {@link EventHubProducerOptions * events are routed to that specific partition. Otherwise, events are automatically routed to an available * partition. * * @param options The set of options to apply when creating the producer. * @return A new {@link EventHubAsyncProducer}. * @throws NullPointerException if {@code options} is {@code null}. */ /** * Creates an Event Hub consumer responsible for reading {@link EventData} from a specific Event Hub partition, as a * member of the specified consumer group, and begins reading events from the {@code eventPosition}. * * The consumer created is non-exclusive, allowing multiple consumers from the same consumer group to be actively * reading events from the partition. These non-exclusive consumers are sometimes referred to as "Non-epoch * Consumers". * * @param consumerGroup The name of the consumer group this consumer is associated with. Events are read in * the context of this group. The name of the consumer group that is created by default is {@link * * @param partitionId The identifier of the Event Hub partition. * @param eventPosition The position within the partition where the consumer should begin reading events. * @return A new {@link EventHubAsyncConsumer} that receives events from the partition at the given position. * @throws NullPointerException If {@code eventPosition}, or {@code options} is {@code null}. * @throws IllegalArgumentException If {@code consumerGroup} or {@code partitionId} is {@code null} or an * empty string. */ public EventHubAsyncConsumer createConsumer(String consumerGroup, String partitionId, EventPosition eventPosition) { return createConsumer(consumerGroup, partitionId, eventPosition, defaultConsumerOptions); } /** * Creates an Event Hub consumer responsible for reading {@link EventData} from a specific Event Hub partition, as a * member of the configured consumer group, and begins reading events from the specified {@code eventPosition}. * * <p> * A consumer may be exclusive, which asserts ownership over the partition for the consumer group to ensure that * only one consumer from that group is reading the from the partition. These exclusive consumers are sometimes * referred to as "Epoch Consumers." * * A consumer may also be non-exclusive, allowing multiple consumers from the same consumer group to be actively * reading events from the partition. These non-exclusive consumers are sometimes referred to as "Non-epoch * Consumers." * * Designating a consumer as exclusive may be specified in the {@code options}, by setting {@link * EventHubConsumerOptions * non-exclusive. * </p> * * @param consumerGroup The name of the consumer group this consumer is associated with. Events are read in * the context of this group. The name of the consumer group that is created by default is {@link * * @param partitionId The identifier of the Event Hub partition from which events will be received. * @param eventPosition The position within the partition where the consumer should begin reading events. * @param options The set of options to apply when creating the consumer. * @return An new {@link EventHubAsyncConsumer} that receives events from the partition with all configured {@link * EventHubConsumerOptions}. * @throws NullPointerException If {@code eventPosition}, {@code consumerGroup}, {@code partitionId}, or {@code * options} is {@code null}. * @throws IllegalArgumentException If {@code consumerGroup} or {@code partitionId} is an empty string. */ public EventHubAsyncConsumer createConsumer(String consumerGroup, String partitionId, EventPosition eventPosition, EventHubConsumerOptions options) { Objects.requireNonNull(eventPosition); Objects.requireNonNull(options); Objects.requireNonNull(consumerGroup); Objects.requireNonNull(partitionId); if (ImplUtils.isNullOrEmpty(consumerGroup)) { throw logger.logExceptionAsError(new IllegalArgumentException("'consumerGroup' cannot be an empty string.")); } if (ImplUtils.isNullOrEmpty(partitionId)) { throw logger.logExceptionAsError(new IllegalArgumentException("'partitionId' cannot be an empty string.")); } final EventHubConsumerOptions clonedOptions = options.clone(); if (clonedOptions.scheduler() == null) { clonedOptions.scheduler(connectionOptions.scheduler()); } if (clonedOptions.retry() == null) { clonedOptions.retry(connectionOptions.retry()); } final String linkName = StringUtil.getRandomString("PR"); final String entityPath = String.format(Locale.US, RECEIVER_ENTITY_PATH_FORMAT, eventHubName, consumerGroup, partitionId); final Mono<AmqpReceiveLink> receiveLinkMono = connectionMono.flatMap(connection -> { return connection.createSession(entityPath).cast(EventHubSession.class); }).flatMap(session -> { logger.verbose("Creating consumer for path: {}", entityPath); final RetryPolicy retryPolicy = RetryUtil.getRetryPolicy(clonedOptions.retry()); return session.createConsumer(linkName, entityPath, getExpression(eventPosition), clonedOptions.retry().tryTimeout(), retryPolicy, options.ownerLevel(), options.identifier()) .cast(AmqpReceiveLink.class); }); return new EventHubAsyncConsumer(receiveLinkMono, clonedOptions); } /** * Closes and disposes of connection to service. Any {@link EventHubAsyncConsumer EventHubConsumers} and {@link * EventHubAsyncProducer EventHubProducers} created with this instance will have their connections closed. */ @Override public void close() { if (hasConnection.getAndSet(false)) { try { final AmqpConnection connection = connectionMono.block(connectionOptions.retry().tryTimeout()); if (connection != null) { connection.close(); } } catch (IOException exception) { throw logger.logExceptionAsError(new AmqpException(false, "Unable to close connection to service", exception, new ErrorContext(connectionOptions.host()))); } } } private static String getExpression(EventPosition eventPosition) { final String isInclusiveFlag = eventPosition.isInclusive() ? "=" : ""; if (eventPosition.offset() != null) { return String.format(AmqpConstants.AMQP_ANNOTATION_FORMAT, OFFSET_ANNOTATION_NAME.getValue(), isInclusiveFlag, eventPosition.offset()); } if (eventPosition.sequenceNumber() != null) { return String.format(AmqpConstants.AMQP_ANNOTATION_FORMAT, SEQUENCE_NUMBER_ANNOTATION_NAME.getValue(), isInclusiveFlag, eventPosition.sequenceNumber()); } if (eventPosition.enqueuedDateTime() != null) { String ms; try { ms = Long.toString(eventPosition.enqueuedDateTime().toEpochMilli()); } catch (ArithmeticException ex) { ms = Long.toString(Long.MAX_VALUE); } return String.format(AmqpConstants.AMQP_ANNOTATION_FORMAT, ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue(), isInclusiveFlag, ms); } throw new IllegalArgumentException("No starting position was set."); } private static class ResponseMapper implements AmqpResponseMapper { @Override public EventHubProperties toEventHubProperties(Map<?, ?> amqpBody) { return new EventHubProperties( (String) amqpBody.get(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY), ((Date) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_CREATED_AT)).toInstant(), (String[]) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IDS)); } @Override public PartitionProperties toPartitionProperties(Map<?, ?> amqpBody) { return new PartitionProperties( (String) amqpBody.get(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY), (String) amqpBody.get(ManagementChannel.MANAGEMENT_PARTITION_NAME_KEY), (Long) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_BEGIN_SEQUENCE_NUMBER), (Long) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_SEQUENCE_NUMBER), (String) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_OFFSET), ((Date) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_TIME_UTC)).toInstant(), (Boolean) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IS_EMPTY)); } } }
class EventHubAsyncClient implements Closeable { /** * The name of the default consumer group in the Event Hubs service. */ public static final String DEFAULT_CONSUMER_GROUP_NAME = "$Default"; private static final String RECEIVER_ENTITY_PATH_FORMAT = "%s/ConsumerGroups/%s/Partitions/%s"; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private final ClientLogger logger = new ClientLogger(EventHubAsyncClient.class); private final String connectionId; private final Mono<EventHubConnection> connectionMono; private final AtomicBoolean hasConnection = new AtomicBoolean(false); private final ConnectionOptions connectionOptions; private final String eventHubName; private final EventHubProducerOptions defaultProducerOptions; private final EventHubConsumerOptions defaultConsumerOptions; private final TracerProvider tracerProvider; EventHubAsyncClient(ConnectionOptions connectionOptions, ReactorProvider provider, ReactorHandlerProvider handlerProvider, TracerProvider tracerProvider) { Objects.requireNonNull(connectionOptions, "'connectionOptions' cannot be null."); Objects.requireNonNull(provider, "'provider' cannot be null."); Objects.requireNonNull(handlerProvider, "'handlerProvider' cannot be null."); Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.connectionOptions = connectionOptions; this.tracerProvider = tracerProvider; this.eventHubName = connectionOptions.eventHubName(); this.connectionId = StringUtil.getRandomString("MF"); this.connectionMono = Mono.fromCallable(() -> { return (EventHubConnection) new ReactorConnection(connectionId, connectionOptions, provider, handlerProvider, new ResponseMapper()); }).doOnSubscribe(c -> hasConnection.set(true)) .cache(); this.defaultProducerOptions = new EventHubProducerOptions() .retry(connectionOptions.retry()); this.defaultConsumerOptions = new EventHubConsumerOptions() .retry(connectionOptions.retry()) .scheduler(connectionOptions.scheduler()); } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getProperties() { return connectionMono.flatMap(connection -> connection.getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties)); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ @ServiceMethod(returns = ReturnType.COLLECTION) public Flux<String> getPartitionIds() { return getProperties().flatMapMany(properties -> Flux.fromArray(properties.partitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionMono.flatMap( connection -> connection.getManagementNode().flatMap(node -> { return node.getPartitionProperties(partitionId); })); } /** * Creates an Event Hub producer responsible for transmitting {@link EventData} to the Event Hub, grouped together * in batches. Event data is automatically routed to an available partition. * * @return A new {@link EventHubAsyncProducer}. */ public EventHubAsyncProducer createProducer() { return createProducer(defaultProducerOptions); } /** * Creates an Event Hub producer responsible for transmitting {@link EventData} to the Event Hub, grouped together * in batches. If {@link EventHubProducerOptions * events are routed to that specific partition. Otherwise, events are automatically routed to an available * partition. * * @param options The set of options to apply when creating the producer. * @return A new {@link EventHubAsyncProducer}. * @throws NullPointerException if {@code options} is {@code null}. */ /** * Creates an Event Hub consumer responsible for reading {@link EventData} from a specific Event Hub partition, as a * member of the specified consumer group, and begins reading events from the {@code eventPosition}. * * The consumer created is non-exclusive, allowing multiple consumers from the same consumer group to be actively * reading events from the partition. These non-exclusive consumers are sometimes referred to as "Non-epoch * Consumers". * * @param consumerGroup The name of the consumer group this consumer is associated with. Events are read in * the context of this group. The name of the consumer group that is created by default is {@link * * @param partitionId The identifier of the Event Hub partition. * @param eventPosition The position within the partition where the consumer should begin reading events. * @return A new {@link EventHubAsyncConsumer} that receives events from the partition at the given position. * @throws NullPointerException If {@code eventPosition}, or {@code options} is {@code null}. * @throws IllegalArgumentException If {@code consumerGroup} or {@code partitionId} is {@code null} or an * empty string. */ public EventHubAsyncConsumer createConsumer(String consumerGroup, String partitionId, EventPosition eventPosition) { return createConsumer(consumerGroup, partitionId, eventPosition, defaultConsumerOptions); } /** * Creates an Event Hub consumer responsible for reading {@link EventData} from a specific Event Hub partition, as a * member of the configured consumer group, and begins reading events from the specified {@code eventPosition}. * * <p> * A consumer may be exclusive, which asserts ownership over the partition for the consumer group to ensure that * only one consumer from that group is reading the from the partition. These exclusive consumers are sometimes * referred to as "Epoch Consumers." * * A consumer may also be non-exclusive, allowing multiple consumers from the same consumer group to be actively * reading events from the partition. These non-exclusive consumers are sometimes referred to as "Non-epoch * Consumers." * * Designating a consumer as exclusive may be specified in the {@code options}, by setting {@link * EventHubConsumerOptions * non-exclusive. * </p> * * @param consumerGroup The name of the consumer group this consumer is associated with. Events are read in * the context of this group. The name of the consumer group that is created by default is {@link * * @param partitionId The identifier of the Event Hub partition from which events will be received. * @param eventPosition The position within the partition where the consumer should begin reading events. * @param options The set of options to apply when creating the consumer. * @return An new {@link EventHubAsyncConsumer} that receives events from the partition with all configured {@link * EventHubConsumerOptions}. * @throws NullPointerException If {@code eventPosition}, {@code consumerGroup}, {@code partitionId}, or * {@code options} is {@code null}. * @throws IllegalArgumentException If {@code consumerGroup} or {@code partitionId} is an empty string. */ public EventHubAsyncConsumer createConsumer(String consumerGroup, String partitionId, EventPosition eventPosition, EventHubConsumerOptions options) { Objects.requireNonNull(eventPosition, "'eventPosition' cannot be null."); Objects.requireNonNull(options, "'options' cannot be null."); Objects.requireNonNull(consumerGroup, "'consumerGroup' cannot be null."); Objects.requireNonNull(partitionId, "'partitionId' cannot be null."); if (consumerGroup.isEmpty()) { throw logger.logExceptionAsError(new IllegalArgumentException("'consumerGroup' cannot be an empty string.")); } else if (partitionId.isEmpty()) { throw logger.logExceptionAsError(new IllegalArgumentException("'partitionId' cannot be an empty string.")); } final EventHubConsumerOptions clonedOptions = options.clone(); if (clonedOptions.scheduler() == null) { clonedOptions.scheduler(connectionOptions.scheduler()); } if (clonedOptions.retry() == null) { clonedOptions.retry(connectionOptions.retry()); } final String linkName = StringUtil.getRandomString("PR"); final String entityPath = String.format(Locale.US, RECEIVER_ENTITY_PATH_FORMAT, eventHubName, consumerGroup, partitionId); final Mono<AmqpReceiveLink> receiveLinkMono = connectionMono.flatMap(connection -> { return connection.createSession(entityPath).cast(EventHubSession.class); }).flatMap(session -> { logger.verbose("Creating consumer for path: {}", entityPath); final RetryPolicy retryPolicy = RetryUtil.getRetryPolicy(clonedOptions.retry()); return session.createConsumer(linkName, entityPath, getExpression(eventPosition), clonedOptions.retry().tryTimeout(), retryPolicy, options.ownerLevel(), options.identifier()) .cast(AmqpReceiveLink.class); }); return new EventHubAsyncConsumer(receiveLinkMono, clonedOptions); } /** * Closes and disposes of connection to service. Any {@link EventHubAsyncConsumer EventHubConsumers} and {@link * EventHubAsyncProducer EventHubProducers} created with this instance will have their connections closed. */ @Override public void close() { if (hasConnection.getAndSet(false)) { try { final AmqpConnection connection = connectionMono.block(connectionOptions.retry().tryTimeout()); if (connection != null) { connection.close(); } } catch (IOException exception) { throw logger.logExceptionAsError(new AmqpException(false, "Unable to close connection to service", exception, new ErrorContext(connectionOptions.host()))); } } } private static String getExpression(EventPosition eventPosition) { final String isInclusiveFlag = eventPosition.isInclusive() ? "=" : ""; if (eventPosition.offset() != null) { return String.format(AmqpConstants.AMQP_ANNOTATION_FORMAT, OFFSET_ANNOTATION_NAME.getValue(), isInclusiveFlag, eventPosition.offset()); } if (eventPosition.sequenceNumber() != null) { return String.format(AmqpConstants.AMQP_ANNOTATION_FORMAT, SEQUENCE_NUMBER_ANNOTATION_NAME.getValue(), isInclusiveFlag, eventPosition.sequenceNumber()); } if (eventPosition.enqueuedDateTime() != null) { String ms; try { ms = Long.toString(eventPosition.enqueuedDateTime().toEpochMilli()); } catch (ArithmeticException ex) { ms = Long.toString(Long.MAX_VALUE); } return String.format(AmqpConstants.AMQP_ANNOTATION_FORMAT, ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue(), isInclusiveFlag, ms); } throw new IllegalArgumentException("No starting position was set."); } private static class ResponseMapper implements AmqpResponseMapper { @Override public EventHubProperties toEventHubProperties(Map<?, ?> amqpBody) { return new EventHubProperties( (String) amqpBody.get(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY), ((Date) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_CREATED_AT)).toInstant(), (String[]) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IDS)); } @Override public PartitionProperties toPartitionProperties(Map<?, ?> amqpBody) { return new PartitionProperties( (String) amqpBody.get(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY), (String) amqpBody.get(ManagementChannel.MANAGEMENT_PARTITION_NAME_KEY), (Long) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_BEGIN_SEQUENCE_NUMBER), (Long) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_SEQUENCE_NUMBER), (String) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_OFFSET), ((Date) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_TIME_UTC)).toInstant(), (Boolean) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IS_EMPTY)); } } }
for every single event you are setting the `sendSpanContext` AtomicReference? Isn't it just the 1st event you want to do this for?
private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.partitionKey(); verifyPartitionKey(partitionKey); return sendLinkMono.flatMap(link -> { final AtomicReference<Context> sendSpanContext = new AtomicReference<>(Context.NONE); return link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final BatchOptions batchOptions = new BatchOptions() .partitionKey(partitionKey) .maximumSizeInBytes(batchSize); return events.map(eventData -> { Context parentContext = eventData.context(); Context entityContext = parentContext.addData(ENTITY_PATH, link.getEntityPath()); sendSpanContext.set(tracerProvider.startSpan(entityContext.addData(HOST_NAME, link.getHostname()), ProcessKind.SEND)); return setSpanContext(eventData, parentContext); }).collect(new EventDataCollector(batchOptions, 1, () -> link.getErrorContext())); }) .flatMap(list -> sendInternal(Flux.fromIterable(list))) .doOnEach(signal -> tracerProvider.endSpan(sendSpanContext.get(), signal)); }); }
sendSpanContext.set(tracerProvider.startSpan(entityContext.addData(HOST_NAME, link.getHostname()), ProcessKind.SEND));
private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.partitionKey(); verifyPartitionKey(partitionKey); if (tracerProvider.isEnabled()) { return sendInternalTracingEnabled(events, partitionKey); } else { return sendInternalTracingDisabled(events, partitionKey); } }
class EventHubAsyncProducer implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; /** * The default maximum allowable size, in bytes, for a batch to be sent. */ public static final int MAX_MESSAGE_LENGTH_BYTES = 256 * 1024; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final BatchOptions DEFAULT_BATCH_OPTIONS = new BatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubAsyncProducer.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final EventHubProducerOptions senderOptions; private final Mono<AmqpSendLink> sendLinkMono; private final boolean isPartitionSender; private final TracerProvider tracerProvider; /** * Creates a new instance of this {@link EventHubAsyncProducer} that sends messages to {@link * EventHubProducerOptions * otherwise, allows the service to load balance the messages amongst available partitions. */ EventHubAsyncProducer(Mono<AmqpSendLink> amqpSendLinkMono, EventHubProducerOptions options, TracerProvider tracerProvider) { this.sendLinkMono = amqpSendLinkMono.cache(); this.senderOptions = options; this.isPartitionSender = !ImplUtils.isNullOrEmpty(options.partitionId()); this.tracerProvider = tracerProvider; } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * @param options A set of options used to configure the {@link EventDataBatch}. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch(BatchOptions options) { Objects.requireNonNull(options); final BatchOptions clone = options.clone(); verifyPartitionKey(clone.partitionKey()); return sendLinkMono.flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (clone.maximumSizeInBytes() > maximumLinkSize) { return Mono.error(new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", clone.maximumSizeInBytes(), maximumLinkSize))); } final int batchSize = clone.maximumSizeInBytes() > 0 ? clone.maximumSizeInBytes() : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, clone.partitionKey(), () -> link.getErrorContext())); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * * @return A {@link Mono} that completes when the event is pushed to the service. */ public Mono<Void> send(EventData event) { Objects.requireNonNull(event); return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * * @return A {@link Mono} that completes when the event is pushed to the service. */ public Mono<Void> send(EventData event, SendOptions options) { Objects.requireNonNull(event); Objects.requireNonNull(options); return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Iterable<EventData> events) { Objects.requireNonNull(events); return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { Objects.requireNonNull(events); return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Flux<EventData> events) { Objects.requireNonNull(events); return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Flux<EventData> events, SendOptions options) { Objects.requireNonNull(events); Objects.requireNonNull(options); return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * @param batch The batch to send to the service. * * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubAsyncProducer * @see EventHubAsyncProducer */ public Mono<Void> send(EventDataBatch batch) { Objects.requireNonNull(batch); if (batch.getEvents().isEmpty()) { logger.info("Cannot send an EventBatch that is empty."); return Mono.empty(); } logger.info("Sending batch with partitionKey[{}], size[{}].", batch.getPartitionKey(), batch.getSize()); final List<Message> messages = EventDataUtil.toAmqpMessage(batch.getPartitionKey(), batch.getEvents()); return sendLinkMono.flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)); } private EventData setSpanContext(EventData event, Context parentContext) { Optional<Object> eventContextData = event.context().getData(SPAN_CONTEXT); if (eventContextData.isPresent()) { tracerProvider.addSpanLinks((Context) eventContextData.get()); return event; } else { Context eventSpanContext = tracerProvider.startSpan(parentContext, ProcessKind.MESSAGE); if (eventSpanContext != null && eventSpanContext.getData(DIAGNOSTIC_ID_KEY).isPresent()) { event.addProperty(DIAGNOSTIC_ID_KEY, eventSpanContext.getData(DIAGNOSTIC_ID_KEY).get().toString()); tracerProvider.endSpan(eventSpanContext, null); event.addContext(SPAN_CONTEXT, eventSpanContext); } } return event; } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error("Error sending batch.", error); }); } private void verifyPartitionKey(String partitionKey) { if (ImplUtils.isNullOrEmpty(partitionKey)) { return; } if (isPartitionSender) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "BatchOptions.partitionKey() cannot be set when an EventHubProducer is created with" + "EventHubProducerOptions.partitionId() set. This EventHubProducer can only send events to partition '%s'.", senderOptions.partitionId()))); } else if (partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "PartitionKey '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } } /** * Disposes of the {@link EventHubAsyncProducer} by closing the underlying connection to the service. * @throws IOException if the underlying transport could not be closed and its resources could not be * disposed. */ @Override public void close() throws IOException { if (!isDisposed.getAndSet(true)) { final AmqpSendLink block = sendLinkMono.block(senderOptions.retry().tryTimeout()); if (block != null) { block.close(); } } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * ErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private volatile EventDataBatch currentBatch; EventDataCollector(BatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.maximumSizeInBytes() > 0 ? options.maximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.partitionKey(); this.contextProvider = contextProvider; currentBatch = new EventDataBatch(this.maxMessageSize, options.partitionKey(), contextProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, "EventData does not fit into maximum number of batches. '%s'", maxNumberOfBatches); throw new AmqpException(false, ErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionKey, contextProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubAsyncProducer implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; /** * The default maximum allowable size, in bytes, for a batch to be sent. */ public static final int MAX_MESSAGE_LENGTH_BYTES = 256 * 1024; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final BatchOptions DEFAULT_BATCH_OPTIONS = new BatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubAsyncProducer.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final EventHubProducerOptions senderOptions; private final Mono<AmqpSendLink> sendLinkMono; private final boolean isPartitionSender; private final TracerProvider tracerProvider; /** * Creates a new instance of this {@link EventHubAsyncProducer} that sends messages to {@link * EventHubProducerOptions * otherwise, allows the service to load balance the messages amongst available partitions. */ EventHubAsyncProducer(Mono<AmqpSendLink> amqpSendLinkMono, EventHubProducerOptions options, TracerProvider tracerProvider) { this.sendLinkMono = amqpSendLinkMono.cache(); this.senderOptions = options; this.isPartitionSender = !ImplUtils.isNullOrEmpty(options.partitionId()); this.tracerProvider = tracerProvider; } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * @param options A set of options used to configure the {@link EventDataBatch}. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch(BatchOptions options) { Objects.requireNonNull(options, "'options' cannot be null."); final BatchOptions clone = options.clone(); verifyPartitionKey(clone.partitionKey()); return sendLinkMono.flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (clone.maximumSizeInBytes() > maximumLinkSize) { return Mono.error(new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", clone.maximumSizeInBytes(), maximumLinkSize))); } final int batchSize = clone.maximumSizeInBytes() > 0 ? clone.maximumSizeInBytes() : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, clone.partitionKey(), () -> link.getErrorContext())); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * * @return A {@link Mono} that completes when the event is pushed to the service. */ public Mono<Void> send(EventData event) { Objects.requireNonNull(event, "'event' cannot be null."); return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * * @return A {@link Mono} that completes when the event is pushed to the service. */ public Mono<Void> send(EventData event, SendOptions options) { Objects.requireNonNull(event, "'event' cannot be null."); Objects.requireNonNull(options, "'options' cannot be null."); return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Iterable<EventData> events) { Objects.requireNonNull(events, "'events' cannot be null."); return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { Objects.requireNonNull(events, "'options' cannot be null."); return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Flux<EventData> events) { Objects.requireNonNull(events, "'events' cannot be null."); return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Flux<EventData> events, SendOptions options) { Objects.requireNonNull(events, "'events' cannot be null."); Objects.requireNonNull(options, "'options' cannot be null."); return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * @param batch The batch to send to the service. * * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubAsyncProducer * @see EventHubAsyncProducer */ public Mono<Void> send(EventDataBatch batch) { Objects.requireNonNull(batch, "'batch' cannot be null."); if (batch.getEvents().isEmpty()) { logger.info("Cannot send an EventBatch that is empty."); return Mono.empty(); } logger.info("Sending batch with partitionKey[{}], size[{}].", batch.getPartitionKey(), batch.getSize()); final List<Message> messages = EventDataUtil.toAmqpMessage(batch.getPartitionKey(), batch.getEvents()); return sendLinkMono.flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)); } private Mono<Void> sendInternalTracingDisabled(Flux<EventData> events, String partitionKey) { return sendLinkMono.flatMap(link -> { return link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final BatchOptions batchOptions = new BatchOptions() .partitionKey(partitionKey) .maximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, () -> link.getErrorContext())); }) .flatMap(list -> sendInternal(Flux.fromIterable(list))); }); } private Mono<Void> sendInternalTracingEnabled(Flux<EventData> events, String partitionKey) { return sendLinkMono.flatMap(link -> { final AtomicReference<Context> sendSpanContext = new AtomicReference<>(Context.NONE); return link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final BatchOptions batchOptions = new BatchOptions() .partitionKey(partitionKey) .maximumSizeInBytes(batchSize); return events.map(eventData -> { Context parentContext = eventData.context(); Context entityContext = parentContext.addData(ENTITY_PATH, link.getEntityPath()); sendSpanContext.set(tracerProvider.startSpan(entityContext.addData(HOST_NAME, link.getHostname()), ProcessKind.SEND)); return setSpanContext(eventData, parentContext); }).collect(new EventDataCollector(batchOptions, 1, () -> link.getErrorContext())); }) .flatMap(list -> sendInternal(Flux.fromIterable(list))) .doOnEach(signal -> { tracerProvider.endSpan(sendSpanContext.get(), signal); }); }); } private EventData setSpanContext(EventData event, Context parentContext) { Optional<Object> eventContextData = event.context().getData(SPAN_CONTEXT); if (eventContextData.isPresent()) { Object spanContextObject = eventContextData.get(); if (spanContextObject instanceof Context) { tracerProvider.addSpanLinks((Context) eventContextData.get()); } else { logger.warning(String.format(Locale.US, "Event Data context type is not of type Context, but type: %s. Not adding span links.", spanContextObject != null ? spanContextObject.getClass() : "null")); } return event; } else { Context eventSpanContext = tracerProvider.startSpan(parentContext, ProcessKind.RECEIVE); if (eventSpanContext != null) { Optional<Object> eventDiagnosticIdOptional = eventSpanContext.getData(DIAGNOSTIC_ID_KEY); if (eventDiagnosticIdOptional.isPresent()) { event.addProperty(DIAGNOSTIC_ID_KEY, eventDiagnosticIdOptional.get().toString()); tracerProvider.endSpan(eventSpanContext, Signal.complete()); event.addContext(SPAN_CONTEXT, eventSpanContext); } } } return event; } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error("Error sending batch.", error); }); } private void verifyPartitionKey(String partitionKey) { if (ImplUtils.isNullOrEmpty(partitionKey)) { return; } if (isPartitionSender) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "BatchOptions.partitionKey() cannot be set when an EventHubProducer is created with" + "EventHubProducerOptions.partitionId() set. This EventHubProducer can only send events to partition '%s'.", senderOptions.partitionId()))); } else if (partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "PartitionKey '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } } /** * Disposes of the {@link EventHubAsyncProducer} by closing the underlying connection to the service. * @throws IOException if the underlying transport could not be closed and its resources could not be * disposed. */ @Override public void close() throws IOException { if (!isDisposed.getAndSet(true)) { final AmqpSendLink block = sendLinkMono.block(senderOptions.retry().tryTimeout()); if (block != null) { block.close(); } } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * ErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private volatile EventDataBatch currentBatch; EventDataCollector(BatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.maximumSizeInBytes() > 0 ? options.maximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.partitionKey(); this.contextProvider = contextProvider; currentBatch = new EventDataBatch(this.maxMessageSize, options.partitionKey(), contextProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, "EventData does not fit into maximum number of batches. '%s'", maxNumberOfBatches); throw new AmqpException(false, ErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionKey, contextProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
TODOs show up in IDE when you use the format: ```java // TODO (samvaity): not supported in Opencensus yet // builder.addLink((Context)eventContextData.get()); ```
private EventData setSpanContext(EventData event, Context parentContext) { Optional<Object> eventContextData = event.context().getData(SPAN_CONTEXT); if (eventContextData.isPresent()) { tracerProvider.addSpanLinks((Context) eventContextData.get()); return event; } else { Context eventSpanContext = tracerProvider.startSpan(parentContext, ProcessKind.MESSAGE); if (eventSpanContext != null && eventSpanContext.getData(DIAGNOSTIC_ID_KEY).isPresent()) { event.addProperty(DIAGNOSTIC_ID_KEY, eventSpanContext.getData(DIAGNOSTIC_ID_KEY).get().toString()); tracerProvider.endSpan(eventSpanContext, null); event.addContext(SPAN_CONTEXT, eventSpanContext); } } return event; }
private EventData setSpanContext(EventData event, Context parentContext) { Optional<Object> eventContextData = event.context().getData(SPAN_CONTEXT); if (eventContextData.isPresent()) { Object spanContextObject = eventContextData.get(); if (spanContextObject instanceof Context) { tracerProvider.addSpanLinks((Context) eventContextData.get()); } else { logger.warning(String.format(Locale.US, "Event Data context type is not of type Context, but type: %s. Not adding span links.", spanContextObject != null ? spanContextObject.getClass() : "null")); } return event; } else { Context eventSpanContext = tracerProvider.startSpan(parentContext, ProcessKind.RECEIVE); if (eventSpanContext != null) { Optional<Object> eventDiagnosticIdOptional = eventSpanContext.getData(DIAGNOSTIC_ID_KEY); if (eventDiagnosticIdOptional.isPresent()) { event.addProperty(DIAGNOSTIC_ID_KEY, eventDiagnosticIdOptional.get().toString()); tracerProvider.endSpan(eventSpanContext, Signal.complete()); event.addContext(SPAN_CONTEXT, eventSpanContext); } } } return event; }
class EventHubAsyncProducer implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; /** * The default maximum allowable size, in bytes, for a batch to be sent. */ public static final int MAX_MESSAGE_LENGTH_BYTES = 256 * 1024; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final BatchOptions DEFAULT_BATCH_OPTIONS = new BatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubAsyncProducer.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final EventHubProducerOptions senderOptions; private final Mono<AmqpSendLink> sendLinkMono; private final boolean isPartitionSender; private final TracerProvider tracerProvider; /** * Creates a new instance of this {@link EventHubAsyncProducer} that sends messages to {@link * EventHubProducerOptions * otherwise, allows the service to load balance the messages amongst available partitions. */ EventHubAsyncProducer(Mono<AmqpSendLink> amqpSendLinkMono, EventHubProducerOptions options, TracerProvider tracerProvider) { this.sendLinkMono = amqpSendLinkMono.cache(); this.senderOptions = options; this.isPartitionSender = !ImplUtils.isNullOrEmpty(options.partitionId()); this.tracerProvider = tracerProvider; } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * @param options A set of options used to configure the {@link EventDataBatch}. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch(BatchOptions options) { Objects.requireNonNull(options); final BatchOptions clone = options.clone(); verifyPartitionKey(clone.partitionKey()); return sendLinkMono.flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (clone.maximumSizeInBytes() > maximumLinkSize) { return Mono.error(new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", clone.maximumSizeInBytes(), maximumLinkSize))); } final int batchSize = clone.maximumSizeInBytes() > 0 ? clone.maximumSizeInBytes() : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, clone.partitionKey(), () -> link.getErrorContext())); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * * @return A {@link Mono} that completes when the event is pushed to the service. */ public Mono<Void> send(EventData event) { Objects.requireNonNull(event); return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * * @return A {@link Mono} that completes when the event is pushed to the service. */ public Mono<Void> send(EventData event, SendOptions options) { Objects.requireNonNull(event); Objects.requireNonNull(options); return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Iterable<EventData> events) { Objects.requireNonNull(events); return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { Objects.requireNonNull(events); return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Flux<EventData> events) { Objects.requireNonNull(events); return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Flux<EventData> events, SendOptions options) { Objects.requireNonNull(events); Objects.requireNonNull(options); return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * @param batch The batch to send to the service. * * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubAsyncProducer * @see EventHubAsyncProducer */ public Mono<Void> send(EventDataBatch batch) { Objects.requireNonNull(batch); if (batch.getEvents().isEmpty()) { logger.info("Cannot send an EventBatch that is empty."); return Mono.empty(); } logger.info("Sending batch with partitionKey[{}], size[{}].", batch.getPartitionKey(), batch.getSize()); final List<Message> messages = EventDataUtil.toAmqpMessage(batch.getPartitionKey(), batch.getEvents()); return sendLinkMono.flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)); } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.partitionKey(); verifyPartitionKey(partitionKey); return sendLinkMono.flatMap(link -> { final AtomicReference<Context> sendSpanContext = new AtomicReference<>(Context.NONE); return link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final BatchOptions batchOptions = new BatchOptions() .partitionKey(partitionKey) .maximumSizeInBytes(batchSize); return events.map(eventData -> { Context parentContext = eventData.context(); Context entityContext = parentContext.addData(ENTITY_PATH, link.getEntityPath()); sendSpanContext.set(tracerProvider.startSpan(entityContext.addData(HOST_NAME, link.getHostname()), ProcessKind.SEND)); return setSpanContext(eventData, parentContext); }).collect(new EventDataCollector(batchOptions, 1, () -> link.getErrorContext())); }) .flatMap(list -> sendInternal(Flux.fromIterable(list))) .doOnEach(signal -> tracerProvider.endSpan(sendSpanContext.get(), signal)); }); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error("Error sending batch.", error); }); } private void verifyPartitionKey(String partitionKey) { if (ImplUtils.isNullOrEmpty(partitionKey)) { return; } if (isPartitionSender) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "BatchOptions.partitionKey() cannot be set when an EventHubProducer is created with" + "EventHubProducerOptions.partitionId() set. This EventHubProducer can only send events to partition '%s'.", senderOptions.partitionId()))); } else if (partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "PartitionKey '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } } /** * Disposes of the {@link EventHubAsyncProducer} by closing the underlying connection to the service. * @throws IOException if the underlying transport could not be closed and its resources could not be * disposed. */ @Override public void close() throws IOException { if (!isDisposed.getAndSet(true)) { final AmqpSendLink block = sendLinkMono.block(senderOptions.retry().tryTimeout()); if (block != null) { block.close(); } } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * ErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private volatile EventDataBatch currentBatch; EventDataCollector(BatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.maximumSizeInBytes() > 0 ? options.maximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.partitionKey(); this.contextProvider = contextProvider; currentBatch = new EventDataBatch(this.maxMessageSize, options.partitionKey(), contextProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, "EventData does not fit into maximum number of batches. '%s'", maxNumberOfBatches); throw new AmqpException(false, ErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionKey, contextProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubAsyncProducer implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; /** * The default maximum allowable size, in bytes, for a batch to be sent. */ public static final int MAX_MESSAGE_LENGTH_BYTES = 256 * 1024; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final BatchOptions DEFAULT_BATCH_OPTIONS = new BatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubAsyncProducer.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final EventHubProducerOptions senderOptions; private final Mono<AmqpSendLink> sendLinkMono; private final boolean isPartitionSender; private final TracerProvider tracerProvider; /** * Creates a new instance of this {@link EventHubAsyncProducer} that sends messages to {@link * EventHubProducerOptions * otherwise, allows the service to load balance the messages amongst available partitions. */ EventHubAsyncProducer(Mono<AmqpSendLink> amqpSendLinkMono, EventHubProducerOptions options, TracerProvider tracerProvider) { this.sendLinkMono = amqpSendLinkMono.cache(); this.senderOptions = options; this.isPartitionSender = !ImplUtils.isNullOrEmpty(options.partitionId()); this.tracerProvider = tracerProvider; } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * @param options A set of options used to configure the {@link EventDataBatch}. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch(BatchOptions options) { Objects.requireNonNull(options, "'options' cannot be null."); final BatchOptions clone = options.clone(); verifyPartitionKey(clone.partitionKey()); return sendLinkMono.flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (clone.maximumSizeInBytes() > maximumLinkSize) { return Mono.error(new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", clone.maximumSizeInBytes(), maximumLinkSize))); } final int batchSize = clone.maximumSizeInBytes() > 0 ? clone.maximumSizeInBytes() : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, clone.partitionKey(), () -> link.getErrorContext())); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * * @return A {@link Mono} that completes when the event is pushed to the service. */ public Mono<Void> send(EventData event) { Objects.requireNonNull(event, "'event' cannot be null."); return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * * @return A {@link Mono} that completes when the event is pushed to the service. */ public Mono<Void> send(EventData event, SendOptions options) { Objects.requireNonNull(event, "'event' cannot be null."); Objects.requireNonNull(options, "'options' cannot be null."); return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Iterable<EventData> events) { Objects.requireNonNull(events, "'events' cannot be null."); return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { Objects.requireNonNull(events, "'options' cannot be null."); return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Flux<EventData> events) { Objects.requireNonNull(events, "'events' cannot be null."); return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Flux<EventData> events, SendOptions options) { Objects.requireNonNull(events, "'events' cannot be null."); Objects.requireNonNull(options, "'options' cannot be null."); return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * @param batch The batch to send to the service. * * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubAsyncProducer * @see EventHubAsyncProducer */ public Mono<Void> send(EventDataBatch batch) { Objects.requireNonNull(batch, "'batch' cannot be null."); if (batch.getEvents().isEmpty()) { logger.info("Cannot send an EventBatch that is empty."); return Mono.empty(); } logger.info("Sending batch with partitionKey[{}], size[{}].", batch.getPartitionKey(), batch.getSize()); final List<Message> messages = EventDataUtil.toAmqpMessage(batch.getPartitionKey(), batch.getEvents()); return sendLinkMono.flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)); } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.partitionKey(); verifyPartitionKey(partitionKey); if (tracerProvider.isEnabled()) { return sendInternalTracingEnabled(events, partitionKey); } else { return sendInternalTracingDisabled(events, partitionKey); } } private Mono<Void> sendInternalTracingDisabled(Flux<EventData> events, String partitionKey) { return sendLinkMono.flatMap(link -> { return link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final BatchOptions batchOptions = new BatchOptions() .partitionKey(partitionKey) .maximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, () -> link.getErrorContext())); }) .flatMap(list -> sendInternal(Flux.fromIterable(list))); }); } private Mono<Void> sendInternalTracingEnabled(Flux<EventData> events, String partitionKey) { return sendLinkMono.flatMap(link -> { final AtomicReference<Context> sendSpanContext = new AtomicReference<>(Context.NONE); return link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final BatchOptions batchOptions = new BatchOptions() .partitionKey(partitionKey) .maximumSizeInBytes(batchSize); return events.map(eventData -> { Context parentContext = eventData.context(); Context entityContext = parentContext.addData(ENTITY_PATH, link.getEntityPath()); sendSpanContext.set(tracerProvider.startSpan(entityContext.addData(HOST_NAME, link.getHostname()), ProcessKind.SEND)); return setSpanContext(eventData, parentContext); }).collect(new EventDataCollector(batchOptions, 1, () -> link.getErrorContext())); }) .flatMap(list -> sendInternal(Flux.fromIterable(list))) .doOnEach(signal -> { tracerProvider.endSpan(sendSpanContext.get(), signal); }); }); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error("Error sending batch.", error); }); } private void verifyPartitionKey(String partitionKey) { if (ImplUtils.isNullOrEmpty(partitionKey)) { return; } if (isPartitionSender) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "BatchOptions.partitionKey() cannot be set when an EventHubProducer is created with" + "EventHubProducerOptions.partitionId() set. This EventHubProducer can only send events to partition '%s'.", senderOptions.partitionId()))); } else if (partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "PartitionKey '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } } /** * Disposes of the {@link EventHubAsyncProducer} by closing the underlying connection to the service. * @throws IOException if the underlying transport could not be closed and its resources could not be * disposed. */ @Override public void close() throws IOException { if (!isDisposed.getAndSet(true)) { final AmqpSendLink block = sendLinkMono.block(senderOptions.retry().tryTimeout()); if (block != null) { block.close(); } } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * ErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private volatile EventDataBatch currentBatch; EventDataCollector(BatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.maximumSizeInBytes() > 0 ? options.maximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.partitionKey(); this.contextProvider = contextProvider; currentBatch = new EventDataBatch(this.maxMessageSize, options.partitionKey(), contextProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, "EventData does not fit into maximum number of batches. '%s'", maxNumberOfBatches); throw new AmqpException(false, ErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionKey, contextProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
I'd add a check that `eventContextData.get()` is also `instanceof Context` to be defensive. Having an invalid cast exception that crashes the program isn't fun. But logger/error handling would surface errors gracefully.
private EventData setSpanContext(EventData event, Context parentContext) { Optional<Object> eventContextData = event.context().getData(SPAN_CONTEXT); if (eventContextData.isPresent()) { tracerProvider.addSpanLinks((Context) eventContextData.get()); return event; } else { Context eventSpanContext = tracerProvider.startSpan(parentContext, ProcessKind.MESSAGE); if (eventSpanContext != null && eventSpanContext.getData(DIAGNOSTIC_ID_KEY).isPresent()) { event.addProperty(DIAGNOSTIC_ID_KEY, eventSpanContext.getData(DIAGNOSTIC_ID_KEY).get().toString()); tracerProvider.endSpan(eventSpanContext, null); event.addContext(SPAN_CONTEXT, eventSpanContext); } } return event; }
tracerProvider.addSpanLinks((Context) eventContextData.get());
private EventData setSpanContext(EventData event, Context parentContext) { Optional<Object> eventContextData = event.context().getData(SPAN_CONTEXT); if (eventContextData.isPresent()) { Object spanContextObject = eventContextData.get(); if (spanContextObject instanceof Context) { tracerProvider.addSpanLinks((Context) eventContextData.get()); } else { logger.warning(String.format(Locale.US, "Event Data context type is not of type Context, but type: %s. Not adding span links.", spanContextObject != null ? spanContextObject.getClass() : "null")); } return event; } else { Context eventSpanContext = tracerProvider.startSpan(parentContext, ProcessKind.RECEIVE); if (eventSpanContext != null) { Optional<Object> eventDiagnosticIdOptional = eventSpanContext.getData(DIAGNOSTIC_ID_KEY); if (eventDiagnosticIdOptional.isPresent()) { event.addProperty(DIAGNOSTIC_ID_KEY, eventDiagnosticIdOptional.get().toString()); tracerProvider.endSpan(eventSpanContext, Signal.complete()); event.addContext(SPAN_CONTEXT, eventSpanContext); } } } return event; }
class EventHubAsyncProducer implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; /** * The default maximum allowable size, in bytes, for a batch to be sent. */ public static final int MAX_MESSAGE_LENGTH_BYTES = 256 * 1024; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final BatchOptions DEFAULT_BATCH_OPTIONS = new BatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubAsyncProducer.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final EventHubProducerOptions senderOptions; private final Mono<AmqpSendLink> sendLinkMono; private final boolean isPartitionSender; private final TracerProvider tracerProvider; /** * Creates a new instance of this {@link EventHubAsyncProducer} that sends messages to {@link * EventHubProducerOptions * otherwise, allows the service to load balance the messages amongst available partitions. */ EventHubAsyncProducer(Mono<AmqpSendLink> amqpSendLinkMono, EventHubProducerOptions options, TracerProvider tracerProvider) { this.sendLinkMono = amqpSendLinkMono.cache(); this.senderOptions = options; this.isPartitionSender = !ImplUtils.isNullOrEmpty(options.partitionId()); this.tracerProvider = tracerProvider; } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * @param options A set of options used to configure the {@link EventDataBatch}. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch(BatchOptions options) { Objects.requireNonNull(options); final BatchOptions clone = options.clone(); verifyPartitionKey(clone.partitionKey()); return sendLinkMono.flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (clone.maximumSizeInBytes() > maximumLinkSize) { return Mono.error(new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", clone.maximumSizeInBytes(), maximumLinkSize))); } final int batchSize = clone.maximumSizeInBytes() > 0 ? clone.maximumSizeInBytes() : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, clone.partitionKey(), () -> link.getErrorContext())); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * * @return A {@link Mono} that completes when the event is pushed to the service. */ public Mono<Void> send(EventData event) { Objects.requireNonNull(event); return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * * @return A {@link Mono} that completes when the event is pushed to the service. */ public Mono<Void> send(EventData event, SendOptions options) { Objects.requireNonNull(event); Objects.requireNonNull(options); return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Iterable<EventData> events) { Objects.requireNonNull(events); return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { Objects.requireNonNull(events); return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Flux<EventData> events) { Objects.requireNonNull(events); return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Flux<EventData> events, SendOptions options) { Objects.requireNonNull(events); Objects.requireNonNull(options); return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * @param batch The batch to send to the service. * * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubAsyncProducer * @see EventHubAsyncProducer */ public Mono<Void> send(EventDataBatch batch) { Objects.requireNonNull(batch); if (batch.getEvents().isEmpty()) { logger.info("Cannot send an EventBatch that is empty."); return Mono.empty(); } logger.info("Sending batch with partitionKey[{}], size[{}].", batch.getPartitionKey(), batch.getSize()); final List<Message> messages = EventDataUtil.toAmqpMessage(batch.getPartitionKey(), batch.getEvents()); return sendLinkMono.flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)); } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.partitionKey(); verifyPartitionKey(partitionKey); return sendLinkMono.flatMap(link -> { final AtomicReference<Context> sendSpanContext = new AtomicReference<>(Context.NONE); return link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final BatchOptions batchOptions = new BatchOptions() .partitionKey(partitionKey) .maximumSizeInBytes(batchSize); return events.map(eventData -> { Context parentContext = eventData.context(); Context entityContext = parentContext.addData(ENTITY_PATH, link.getEntityPath()); sendSpanContext.set(tracerProvider.startSpan(entityContext.addData(HOST_NAME, link.getHostname()), ProcessKind.SEND)); return setSpanContext(eventData, parentContext); }).collect(new EventDataCollector(batchOptions, 1, () -> link.getErrorContext())); }) .flatMap(list -> sendInternal(Flux.fromIterable(list))) .doOnEach(signal -> tracerProvider.endSpan(sendSpanContext.get(), signal)); }); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error("Error sending batch.", error); }); } private void verifyPartitionKey(String partitionKey) { if (ImplUtils.isNullOrEmpty(partitionKey)) { return; } if (isPartitionSender) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "BatchOptions.partitionKey() cannot be set when an EventHubProducer is created with" + "EventHubProducerOptions.partitionId() set. This EventHubProducer can only send events to partition '%s'.", senderOptions.partitionId()))); } else if (partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "PartitionKey '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } } /** * Disposes of the {@link EventHubAsyncProducer} by closing the underlying connection to the service. * @throws IOException if the underlying transport could not be closed and its resources could not be * disposed. */ @Override public void close() throws IOException { if (!isDisposed.getAndSet(true)) { final AmqpSendLink block = sendLinkMono.block(senderOptions.retry().tryTimeout()); if (block != null) { block.close(); } } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * ErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private volatile EventDataBatch currentBatch; EventDataCollector(BatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.maximumSizeInBytes() > 0 ? options.maximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.partitionKey(); this.contextProvider = contextProvider; currentBatch = new EventDataBatch(this.maxMessageSize, options.partitionKey(), contextProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, "EventData does not fit into maximum number of batches. '%s'", maxNumberOfBatches); throw new AmqpException(false, ErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionKey, contextProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubAsyncProducer implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; /** * The default maximum allowable size, in bytes, for a batch to be sent. */ public static final int MAX_MESSAGE_LENGTH_BYTES = 256 * 1024; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final BatchOptions DEFAULT_BATCH_OPTIONS = new BatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubAsyncProducer.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final EventHubProducerOptions senderOptions; private final Mono<AmqpSendLink> sendLinkMono; private final boolean isPartitionSender; private final TracerProvider tracerProvider; /** * Creates a new instance of this {@link EventHubAsyncProducer} that sends messages to {@link * EventHubProducerOptions * otherwise, allows the service to load balance the messages amongst available partitions. */ EventHubAsyncProducer(Mono<AmqpSendLink> amqpSendLinkMono, EventHubProducerOptions options, TracerProvider tracerProvider) { this.sendLinkMono = amqpSendLinkMono.cache(); this.senderOptions = options; this.isPartitionSender = !ImplUtils.isNullOrEmpty(options.partitionId()); this.tracerProvider = tracerProvider; } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * @param options A set of options used to configure the {@link EventDataBatch}. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch(BatchOptions options) { Objects.requireNonNull(options, "'options' cannot be null."); final BatchOptions clone = options.clone(); verifyPartitionKey(clone.partitionKey()); return sendLinkMono.flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (clone.maximumSizeInBytes() > maximumLinkSize) { return Mono.error(new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", clone.maximumSizeInBytes(), maximumLinkSize))); } final int batchSize = clone.maximumSizeInBytes() > 0 ? clone.maximumSizeInBytes() : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, clone.partitionKey(), () -> link.getErrorContext())); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * * @return A {@link Mono} that completes when the event is pushed to the service. */ public Mono<Void> send(EventData event) { Objects.requireNonNull(event, "'event' cannot be null."); return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * * @return A {@link Mono} that completes when the event is pushed to the service. */ public Mono<Void> send(EventData event, SendOptions options) { Objects.requireNonNull(event, "'event' cannot be null."); Objects.requireNonNull(options, "'options' cannot be null."); return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Iterable<EventData> events) { Objects.requireNonNull(events, "'events' cannot be null."); return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { Objects.requireNonNull(events, "'options' cannot be null."); return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Flux<EventData> events) { Objects.requireNonNull(events, "'events' cannot be null."); return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Flux<EventData> events, SendOptions options) { Objects.requireNonNull(events, "'events' cannot be null."); Objects.requireNonNull(options, "'options' cannot be null."); return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * @param batch The batch to send to the service. * * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubAsyncProducer * @see EventHubAsyncProducer */ public Mono<Void> send(EventDataBatch batch) { Objects.requireNonNull(batch, "'batch' cannot be null."); if (batch.getEvents().isEmpty()) { logger.info("Cannot send an EventBatch that is empty."); return Mono.empty(); } logger.info("Sending batch with partitionKey[{}], size[{}].", batch.getPartitionKey(), batch.getSize()); final List<Message> messages = EventDataUtil.toAmqpMessage(batch.getPartitionKey(), batch.getEvents()); return sendLinkMono.flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)); } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.partitionKey(); verifyPartitionKey(partitionKey); if (tracerProvider.isEnabled()) { return sendInternalTracingEnabled(events, partitionKey); } else { return sendInternalTracingDisabled(events, partitionKey); } } private Mono<Void> sendInternalTracingDisabled(Flux<EventData> events, String partitionKey) { return sendLinkMono.flatMap(link -> { return link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final BatchOptions batchOptions = new BatchOptions() .partitionKey(partitionKey) .maximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, () -> link.getErrorContext())); }) .flatMap(list -> sendInternal(Flux.fromIterable(list))); }); } private Mono<Void> sendInternalTracingEnabled(Flux<EventData> events, String partitionKey) { return sendLinkMono.flatMap(link -> { final AtomicReference<Context> sendSpanContext = new AtomicReference<>(Context.NONE); return link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final BatchOptions batchOptions = new BatchOptions() .partitionKey(partitionKey) .maximumSizeInBytes(batchSize); return events.map(eventData -> { Context parentContext = eventData.context(); Context entityContext = parentContext.addData(ENTITY_PATH, link.getEntityPath()); sendSpanContext.set(tracerProvider.startSpan(entityContext.addData(HOST_NAME, link.getHostname()), ProcessKind.SEND)); return setSpanContext(eventData, parentContext); }).collect(new EventDataCollector(batchOptions, 1, () -> link.getErrorContext())); }) .flatMap(list -> sendInternal(Flux.fromIterable(list))) .doOnEach(signal -> { tracerProvider.endSpan(sendSpanContext.get(), signal); }); }); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error("Error sending batch.", error); }); } private void verifyPartitionKey(String partitionKey) { if (ImplUtils.isNullOrEmpty(partitionKey)) { return; } if (isPartitionSender) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "BatchOptions.partitionKey() cannot be set when an EventHubProducer is created with" + "EventHubProducerOptions.partitionId() set. This EventHubProducer can only send events to partition '%s'.", senderOptions.partitionId()))); } else if (partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "PartitionKey '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } } /** * Disposes of the {@link EventHubAsyncProducer} by closing the underlying connection to the service. * @throws IOException if the underlying transport could not be closed and its resources could not be * disposed. */ @Override public void close() throws IOException { if (!isDisposed.getAndSet(true)) { final AmqpSendLink block = sendLinkMono.block(senderOptions.retry().tryTimeout()); if (block != null) { block.close(); } } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * ErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private volatile EventDataBatch currentBatch; EventDataCollector(BatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.maximumSizeInBytes() > 0 ? options.maximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.partitionKey(); this.contextProvider = contextProvider; currentBatch = new EventDataBatch(this.maxMessageSize, options.partitionKey(), contextProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, "EventData does not fit into maximum number of batches. '%s'", maxNumberOfBatches); throw new AmqpException(false, ErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionKey, contextProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
Since this value `eventSpanContext.getData(DIAGNOSTIC_ID_KEY)` is used multiple times, create a local variable for it. It helps with readability and in some magical case that the value gets updated between reads.
private EventData setSpanContext(EventData event, Context parentContext) { Optional<Object> eventContextData = event.context().getData(SPAN_CONTEXT); if (eventContextData.isPresent()) { tracerProvider.addSpanLinks((Context) eventContextData.get()); return event; } else { Context eventSpanContext = tracerProvider.startSpan(parentContext, ProcessKind.MESSAGE); if (eventSpanContext != null && eventSpanContext.getData(DIAGNOSTIC_ID_KEY).isPresent()) { event.addProperty(DIAGNOSTIC_ID_KEY, eventSpanContext.getData(DIAGNOSTIC_ID_KEY).get().toString()); tracerProvider.endSpan(eventSpanContext, null); event.addContext(SPAN_CONTEXT, eventSpanContext); } } return event; }
if (eventSpanContext != null && eventSpanContext.getData(DIAGNOSTIC_ID_KEY).isPresent()) {
private EventData setSpanContext(EventData event, Context parentContext) { Optional<Object> eventContextData = event.context().getData(SPAN_CONTEXT); if (eventContextData.isPresent()) { Object spanContextObject = eventContextData.get(); if (spanContextObject instanceof Context) { tracerProvider.addSpanLinks((Context) eventContextData.get()); } else { logger.warning(String.format(Locale.US, "Event Data context type is not of type Context, but type: %s. Not adding span links.", spanContextObject != null ? spanContextObject.getClass() : "null")); } return event; } else { Context eventSpanContext = tracerProvider.startSpan(parentContext, ProcessKind.RECEIVE); if (eventSpanContext != null) { Optional<Object> eventDiagnosticIdOptional = eventSpanContext.getData(DIAGNOSTIC_ID_KEY); if (eventDiagnosticIdOptional.isPresent()) { event.addProperty(DIAGNOSTIC_ID_KEY, eventDiagnosticIdOptional.get().toString()); tracerProvider.endSpan(eventSpanContext, Signal.complete()); event.addContext(SPAN_CONTEXT, eventSpanContext); } } } return event; }
class EventHubAsyncProducer implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; /** * The default maximum allowable size, in bytes, for a batch to be sent. */ public static final int MAX_MESSAGE_LENGTH_BYTES = 256 * 1024; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final BatchOptions DEFAULT_BATCH_OPTIONS = new BatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubAsyncProducer.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final EventHubProducerOptions senderOptions; private final Mono<AmqpSendLink> sendLinkMono; private final boolean isPartitionSender; private final TracerProvider tracerProvider; /** * Creates a new instance of this {@link EventHubAsyncProducer} that sends messages to {@link * EventHubProducerOptions * otherwise, allows the service to load balance the messages amongst available partitions. */ EventHubAsyncProducer(Mono<AmqpSendLink> amqpSendLinkMono, EventHubProducerOptions options, TracerProvider tracerProvider) { this.sendLinkMono = amqpSendLinkMono.cache(); this.senderOptions = options; this.isPartitionSender = !ImplUtils.isNullOrEmpty(options.partitionId()); this.tracerProvider = tracerProvider; } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * @param options A set of options used to configure the {@link EventDataBatch}. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch(BatchOptions options) { Objects.requireNonNull(options); final BatchOptions clone = options.clone(); verifyPartitionKey(clone.partitionKey()); return sendLinkMono.flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (clone.maximumSizeInBytes() > maximumLinkSize) { return Mono.error(new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", clone.maximumSizeInBytes(), maximumLinkSize))); } final int batchSize = clone.maximumSizeInBytes() > 0 ? clone.maximumSizeInBytes() : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, clone.partitionKey(), () -> link.getErrorContext())); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * * @return A {@link Mono} that completes when the event is pushed to the service. */ public Mono<Void> send(EventData event) { Objects.requireNonNull(event); return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * * @return A {@link Mono} that completes when the event is pushed to the service. */ public Mono<Void> send(EventData event, SendOptions options) { Objects.requireNonNull(event); Objects.requireNonNull(options); return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Iterable<EventData> events) { Objects.requireNonNull(events); return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { Objects.requireNonNull(events); return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Flux<EventData> events) { Objects.requireNonNull(events); return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Flux<EventData> events, SendOptions options) { Objects.requireNonNull(events); Objects.requireNonNull(options); return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * @param batch The batch to send to the service. * * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubAsyncProducer * @see EventHubAsyncProducer */ public Mono<Void> send(EventDataBatch batch) { Objects.requireNonNull(batch); if (batch.getEvents().isEmpty()) { logger.info("Cannot send an EventBatch that is empty."); return Mono.empty(); } logger.info("Sending batch with partitionKey[{}], size[{}].", batch.getPartitionKey(), batch.getSize()); final List<Message> messages = EventDataUtil.toAmqpMessage(batch.getPartitionKey(), batch.getEvents()); return sendLinkMono.flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)); } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.partitionKey(); verifyPartitionKey(partitionKey); return sendLinkMono.flatMap(link -> { final AtomicReference<Context> sendSpanContext = new AtomicReference<>(Context.NONE); return link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final BatchOptions batchOptions = new BatchOptions() .partitionKey(partitionKey) .maximumSizeInBytes(batchSize); return events.map(eventData -> { Context parentContext = eventData.context(); Context entityContext = parentContext.addData(ENTITY_PATH, link.getEntityPath()); sendSpanContext.set(tracerProvider.startSpan(entityContext.addData(HOST_NAME, link.getHostname()), ProcessKind.SEND)); return setSpanContext(eventData, parentContext); }).collect(new EventDataCollector(batchOptions, 1, () -> link.getErrorContext())); }) .flatMap(list -> sendInternal(Flux.fromIterable(list))) .doOnEach(signal -> tracerProvider.endSpan(sendSpanContext.get(), signal)); }); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error("Error sending batch.", error); }); } private void verifyPartitionKey(String partitionKey) { if (ImplUtils.isNullOrEmpty(partitionKey)) { return; } if (isPartitionSender) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "BatchOptions.partitionKey() cannot be set when an EventHubProducer is created with" + "EventHubProducerOptions.partitionId() set. This EventHubProducer can only send events to partition '%s'.", senderOptions.partitionId()))); } else if (partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "PartitionKey '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } } /** * Disposes of the {@link EventHubAsyncProducer} by closing the underlying connection to the service. * @throws IOException if the underlying transport could not be closed and its resources could not be * disposed. */ @Override public void close() throws IOException { if (!isDisposed.getAndSet(true)) { final AmqpSendLink block = sendLinkMono.block(senderOptions.retry().tryTimeout()); if (block != null) { block.close(); } } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * ErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private volatile EventDataBatch currentBatch; EventDataCollector(BatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.maximumSizeInBytes() > 0 ? options.maximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.partitionKey(); this.contextProvider = contextProvider; currentBatch = new EventDataBatch(this.maxMessageSize, options.partitionKey(), contextProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, "EventData does not fit into maximum number of batches. '%s'", maxNumberOfBatches); throw new AmqpException(false, ErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionKey, contextProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubAsyncProducer implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; /** * The default maximum allowable size, in bytes, for a batch to be sent. */ public static final int MAX_MESSAGE_LENGTH_BYTES = 256 * 1024; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final BatchOptions DEFAULT_BATCH_OPTIONS = new BatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubAsyncProducer.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final EventHubProducerOptions senderOptions; private final Mono<AmqpSendLink> sendLinkMono; private final boolean isPartitionSender; private final TracerProvider tracerProvider; /** * Creates a new instance of this {@link EventHubAsyncProducer} that sends messages to {@link * EventHubProducerOptions * otherwise, allows the service to load balance the messages amongst available partitions. */ EventHubAsyncProducer(Mono<AmqpSendLink> amqpSendLinkMono, EventHubProducerOptions options, TracerProvider tracerProvider) { this.sendLinkMono = amqpSendLinkMono.cache(); this.senderOptions = options; this.isPartitionSender = !ImplUtils.isNullOrEmpty(options.partitionId()); this.tracerProvider = tracerProvider; } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * @param options A set of options used to configure the {@link EventDataBatch}. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch(BatchOptions options) { Objects.requireNonNull(options, "'options' cannot be null."); final BatchOptions clone = options.clone(); verifyPartitionKey(clone.partitionKey()); return sendLinkMono.flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (clone.maximumSizeInBytes() > maximumLinkSize) { return Mono.error(new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", clone.maximumSizeInBytes(), maximumLinkSize))); } final int batchSize = clone.maximumSizeInBytes() > 0 ? clone.maximumSizeInBytes() : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, clone.partitionKey(), () -> link.getErrorContext())); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * * @return A {@link Mono} that completes when the event is pushed to the service. */ public Mono<Void> send(EventData event) { Objects.requireNonNull(event, "'event' cannot be null."); return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * * @return A {@link Mono} that completes when the event is pushed to the service. */ public Mono<Void> send(EventData event, SendOptions options) { Objects.requireNonNull(event, "'event' cannot be null."); Objects.requireNonNull(options, "'options' cannot be null."); return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Iterable<EventData> events) { Objects.requireNonNull(events, "'events' cannot be null."); return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { Objects.requireNonNull(events, "'options' cannot be null."); return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Flux<EventData> events) { Objects.requireNonNull(events, "'events' cannot be null."); return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Flux<EventData> events, SendOptions options) { Objects.requireNonNull(events, "'events' cannot be null."); Objects.requireNonNull(options, "'options' cannot be null."); return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * @param batch The batch to send to the service. * * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubAsyncProducer * @see EventHubAsyncProducer */ public Mono<Void> send(EventDataBatch batch) { Objects.requireNonNull(batch, "'batch' cannot be null."); if (batch.getEvents().isEmpty()) { logger.info("Cannot send an EventBatch that is empty."); return Mono.empty(); } logger.info("Sending batch with partitionKey[{}], size[{}].", batch.getPartitionKey(), batch.getSize()); final List<Message> messages = EventDataUtil.toAmqpMessage(batch.getPartitionKey(), batch.getEvents()); return sendLinkMono.flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)); } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.partitionKey(); verifyPartitionKey(partitionKey); if (tracerProvider.isEnabled()) { return sendInternalTracingEnabled(events, partitionKey); } else { return sendInternalTracingDisabled(events, partitionKey); } } private Mono<Void> sendInternalTracingDisabled(Flux<EventData> events, String partitionKey) { return sendLinkMono.flatMap(link -> { return link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final BatchOptions batchOptions = new BatchOptions() .partitionKey(partitionKey) .maximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, () -> link.getErrorContext())); }) .flatMap(list -> sendInternal(Flux.fromIterable(list))); }); } private Mono<Void> sendInternalTracingEnabled(Flux<EventData> events, String partitionKey) { return sendLinkMono.flatMap(link -> { final AtomicReference<Context> sendSpanContext = new AtomicReference<>(Context.NONE); return link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final BatchOptions batchOptions = new BatchOptions() .partitionKey(partitionKey) .maximumSizeInBytes(batchSize); return events.map(eventData -> { Context parentContext = eventData.context(); Context entityContext = parentContext.addData(ENTITY_PATH, link.getEntityPath()); sendSpanContext.set(tracerProvider.startSpan(entityContext.addData(HOST_NAME, link.getHostname()), ProcessKind.SEND)); return setSpanContext(eventData, parentContext); }).collect(new EventDataCollector(batchOptions, 1, () -> link.getErrorContext())); }) .flatMap(list -> sendInternal(Flux.fromIterable(list))) .doOnEach(signal -> { tracerProvider.endSpan(sendSpanContext.get(), signal); }); }); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error("Error sending batch.", error); }); } private void verifyPartitionKey(String partitionKey) { if (ImplUtils.isNullOrEmpty(partitionKey)) { return; } if (isPartitionSender) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "BatchOptions.partitionKey() cannot be set when an EventHubProducer is created with" + "EventHubProducerOptions.partitionId() set. This EventHubProducer can only send events to partition '%s'.", senderOptions.partitionId()))); } else if (partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "PartitionKey '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } } /** * Disposes of the {@link EventHubAsyncProducer} by closing the underlying connection to the service. * @throws IOException if the underlying transport could not be closed and its resources could not be * disposed. */ @Override public void close() throws IOException { if (!isDisposed.getAndSet(true)) { final AmqpSendLink block = sendLinkMono.block(senderOptions.retry().tryTimeout()); if (block != null) { block.close(); } } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * ErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private volatile EventDataBatch currentBatch; EventDataCollector(BatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.maximumSizeInBytes() > 0 ? options.maximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.partitionKey(); this.contextProvider = contextProvider; currentBatch = new EventDataBatch(this.maxMessageSize, options.partitionKey(), contextProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, "EventData does not fit into maximum number of batches. '%s'", maxNumberOfBatches); throw new AmqpException(false, ErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionKey, contextProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
Move this before line 83. Then you don't need those return statements in your cases and replace with break instead. You can have default: with a break statement.
public void endSpan(Context context, Signal<Void> signal) { Objects.requireNonNull(context, "'context' cannot be null"); Objects.requireNonNull(signal, "'signal' cannot be null"); if (!context.getData(OPENTELEMETRY_SPAN_KEY).isPresent()) { return; } String errorCondition; Throwable throwable; switch (signal.getType()) { case ON_COMPLETE: end("success", null, context); return; case ON_ERROR: errorCondition = ""; throwable = null; if (signal.hasError()) { throwable = signal.getThrowable(); if (throwable instanceof AmqpException) { AmqpException exception = (AmqpException) throwable; errorCondition = exception.getErrorCondition().getErrorCondition(); } } break; default: return; } end(errorCondition, throwable, context); }
end(errorCondition, throwable, context);
public void endSpan(Context context, Signal<Void> signal) { Objects.requireNonNull(context, "'context' cannot be null"); Objects.requireNonNull(signal, "'signal' cannot be null"); if (!context.getData(OPENTELEMETRY_SPAN_KEY).isPresent()) { return; } switch (signal.getType()) { case ON_COMPLETE: end("success", null, context); break; case ON_ERROR: String errorCondition = ""; Throwable throwable = null; if (signal.hasError()) { throwable = signal.getThrowable(); if (throwable instanceof AmqpException) { AmqpException exception = (AmqpException) throwable; errorCondition = exception.getErrorCondition().getErrorCondition(); } } end(errorCondition, throwable, context); break; default: break; } }
class TracerProvider { private final List<Tracer> tracers; public TracerProvider(List<Tracer> tracers) { this.tracers = Objects.requireNonNull(tracers); } public boolean isEnabled() { return tracers.size() > 0; } /** * For each tracer plugged into the SDK a new tracing span is created. * * The {@code context} will be checked for containing information about a parent span. If a parent span is found the * new span will be added as a child, otherwise the span will be created and added to the context and any downstream * start calls will use the created span as the parent. * * @param {@link Context context} Additional metadata that is passed through the call stack. * @param {@link ProcessKind processKind} the invoking process type. * @return An updated context object. */ public Context startSpan(Context context, ProcessKind processKind) { Context local = Objects.requireNonNull(context, "'context' cannot be null"); Objects.requireNonNull(processKind, "'processKind' cannot be null"); String spanName = "Azure.eventhubs." + processKind.getProcessKind(); for (Tracer tracer : tracers) { local = tracer.start(spanName, local, processKind); } return local; } /** * Given a context containing the current tracing span the span is marked completed with status info from {@link Signal} * For each tracer plugged into the SDK the current tracing span is marked as completed. * * @param context Additional metadata that is passed through the call stack. * @param signal The signal indicates the status and contains the metadata we need to end the tracing span. */ /** * For each tracer plugged into the SDK a link is created between the parent tracing span and * the current service call. * * @param context Additional metadata that is passed through the call stack. */ public void addSpanLinks(Context context) { Objects.requireNonNull(context, "'context' cannot be null"); tracers.forEach(tracer -> tracer.addLink(context)); } /** * For each tracer plugged into the SDK a new context is extracted from the event's diagnostic Id. * * @param diagnosticId Unique identifier of an external call from producer to the queue. */ public Context extractContext(String diagnosticId, Context context) { Context local = Objects.requireNonNull(context, "'context' cannot be null"); Objects.requireNonNull(diagnosticId, "'diagnosticId' cannot be null"); for (Tracer tracer : tracers) { local = tracer.extractContext(diagnosticId, local); } return local; } private void end(String statusMessage, Throwable throwable, Context context) { for (Tracer tracer : tracers) { tracer.end(statusMessage, throwable, context); } } }
class TracerProvider { private final ClientLogger logger = new ClientLogger(TracerProvider.class); private final List<Tracer> tracers = new ArrayList<>(); public TracerProvider(Iterable<Tracer> tracers) { Objects.requireNonNull(tracers, "'tracers' cannot be null."); tracers.forEach(e -> this.tracers.add(e)); } public boolean isEnabled() { return tracers.size() > 0; } /** * For each tracer plugged into the SDK a new tracing span is created. * * The {@code context} will be checked for containing information about a parent span. If a parent span is found the * new span will be added as a child, otherwise the span will be created and added to the context and any downstream * start calls will use the created span as the parent. * * @param context Additional metadata that is passed through the call stack. * @param processKind the invoking process type. * @return An updated context object. */ public Context startSpan(Context context, ProcessKind processKind) { Context local = Objects.requireNonNull(context, "'context' cannot be null"); Objects.requireNonNull(processKind, "'processKind' cannot be null"); String spanName = getSpanName(processKind); for (Tracer tracer : tracers) { local = tracer.start(spanName, local, processKind); } return local; } /** * Given a context containing the current tracing span the span is marked completed with status info from * {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed. * * @param context Additional metadata that is passed through the call stack. * @param signal The signal indicates the status and contains the metadata we need to end the tracing span. */ /** * For each tracer plugged into the SDK a link is created between the parent tracing span and * the current service call. * * @param context Additional metadata that is passed through the call stack. */ public void addSpanLinks(Context context) { Objects.requireNonNull(context, "'context' cannot be null"); tracers.forEach(tracer -> tracer.addLink(context)); } /** * For each tracer plugged into the SDK a new context is extracted from the event's diagnostic Id. * * @param diagnosticId Unique identifier of an external call from producer to the queue. */ public Context extractContext(String diagnosticId, Context context) { Context local = Objects.requireNonNull(context, "'context' cannot be null"); Objects.requireNonNull(diagnosticId, "'diagnosticId' cannot be null"); for (Tracer tracer : tracers) { local = tracer.extractContext(diagnosticId, local); } return local; } private void end(String statusMessage, Throwable throwable, Context context) { for (Tracer tracer : tracers) { tracer.end(statusMessage, throwable, context); } } private String getSpanName(ProcessKind processKind) { String spanName = "Azure.eventhubs."; switch (processKind) { case SEND: spanName += "send"; break; case RECEIVE: spanName += "message"; break; case PROCESS: spanName += "process"; break; default: logger.warning("Unknown processKind type: {}", processKind); break; } return spanName; } }
In your verify, you explicitly type out "ampq:not-found". if we decide to change this amqp error condition string in the future, it'll break this test. It's better to do something like: ```java final ErrorCondition condition = ErrorCondition.NOT_FOUND; final Exception exception = new AmqpException(true, condition, "", null); // Act ... // Assert verify(tracer1, times(1)).end(condition.getErrorCondition(), exception, sendContext);
public void endSpanAmqpException() { final Tracer tracer1 = mock(Tracer.class); List<Tracer> tracers = Arrays.asList(tracer1); final TracerProvider tracerProvider = new TracerProvider(tracers); final Exception exception = new AmqpException(true, ErrorCondition.NOT_FOUND, "", null); Context sendContext = new Context(OPENTELEMETRY_SPAN_KEY, "value"); tracerProvider.endSpan(sendContext, Signal.error(exception)); verify(tracer1, times(1)).end("amqp:not-found", exception, sendContext); }
final Exception exception = new AmqpException(true, ErrorCondition.NOT_FOUND, "", null);
public void endSpanAmqpException() { final ErrorCondition errorCondition = ErrorCondition.NOT_FOUND; final Exception exception = new AmqpException(true, errorCondition, "", null); Context sendContext = new Context(OPENTELEMETRY_SPAN_KEY, "value"); tracerProvider.endSpan(sendContext, Signal.error(exception)); for (Tracer t : tracers) { verify(t, times(1)) .end(errorCondition.getErrorCondition(), exception, sendContext); } }
class TracerProviderTest { @Test public void startSpan() { final Tracer tracer1 = mock(Tracer.class); List<Tracer> tracers = Arrays.asList(tracer1); final TracerProvider tracerProvider = new TracerProvider(tracers); Context updatedContext = tracerProvider.startSpan(Context.NONE, ProcessKind.SEND); verify(tracer1, times(1)).start(eq("Azure.eventhubs.send"), any(), eq(ProcessKind.SEND)); } @Test public void isEnabled() { List<Tracer> tracers = Collections.emptyList(); final TracerProvider tracerProvider = new TracerProvider(tracers); Assert.assertEquals(false, tracerProvider.isEnabled()); } @Test public void startSpanReturnsUpdatedContext() { final Tracer tracer1 = mock(Tracer.class); List<Tracer> tracers = Arrays.asList(tracer1); final TracerProvider tracerProvider = new TracerProvider(tracers); when(tracer1.start("Azure.eventhubs.send", Context.NONE, ProcessKind.SEND)).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(OPENTELEMETRY_SPAN_KEY, "value"); } ); Context updatedContext = tracerProvider.startSpan(Context.NONE, ProcessKind.SEND); Assert.assertEquals(Context.class, updatedContext.getClass()); Assert.assertEquals(updatedContext.getData(OPENTELEMETRY_SPAN_KEY).get(), "value"); } @Test public void endSpanSuccess() { final Tracer tracer1 = mock(Tracer.class); List<Tracer> tracers = Arrays.asList(tracer1); final TracerProvider tracerProvider = new TracerProvider(tracers); tracerProvider.endSpan(new Context(OPENTELEMETRY_SPAN_KEY, "value"), Signal.complete()); verify(tracer1, times(1)).end(eq("success"), isNull(), any(Context.class)); } @Test public void endSpanNoKey() { final Tracer tracer1 = mock(Tracer.class); List<Tracer> tracers = Arrays.asList(tracer1); final TracerProvider tracerProvider = new TracerProvider(tracers); tracerProvider.endSpan(Context.NONE, Signal.complete()); verify(tracer1, never()).end("", null, Context.NONE); } @Test public void endSpanError() { final Tracer tracer1 = mock(Tracer.class); List<Tracer> tracers = Arrays.asList(tracer1); final TracerProvider tracerProvider = new TracerProvider(tracers); Throwable testThrow = new Throwable("testError"); Context sendContext = new Context(OPENTELEMETRY_SPAN_KEY, "value"); tracerProvider.endSpan(sendContext, Signal.error(testThrow)); verify(tracer1, times(1)).end("", testThrow, sendContext); } @Test }
class TracerProviderTest { private static final String METHOD_NAME = "Azure.eventhubs.send"; @Mock private Tracer tracer; @Mock private Tracer tracer2; private List<Tracer> tracers; private TracerProvider tracerProvider; @Before public void setup() { MockitoAnnotations.initMocks(this); tracers = Arrays.asList(tracer, tracer2); tracerProvider = new TracerProvider(tracers); } @After public void teardown() { Mockito.framework().clearInlineMocks(); } @Test public void startSpan() { tracerProvider.startSpan(Context.NONE, ProcessKind.SEND); for (Tracer t : tracers) { verify(t, times(1)) .start(eq(METHOD_NAME), any(), eq(ProcessKind.SEND)); } } @Test public void notEnabledWhenNoTracers() { final TracerProvider provider = new TracerProvider(Collections.emptyList()); Assert.assertFalse(provider.isEnabled()); } @Test public void startSpanReturnsUpdatedContext() { final String parentKey = "parent-key"; final String parentValue = "parent-value"; final String childKey = "child-key"; final String childValue = "child-value"; final Context startingContext = Context.NONE; when(tracer.start(METHOD_NAME, startingContext, ProcessKind.SEND)).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(parentKey, parentValue); } ); when(tracer2.start(eq(METHOD_NAME), any(), eq(ProcessKind.SEND))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(childKey, childValue); } ); final Context updatedContext = tracerProvider.startSpan(startingContext, ProcessKind.SEND); final Optional<Object> parentData = updatedContext.getData(parentKey); Assert.assertTrue(parentData.isPresent()); Assert.assertEquals(parentValue, parentData.get()); final Optional<Object> childData = updatedContext.getData(childKey); Assert.assertTrue(childData.isPresent()); Assert.assertEquals(childValue, childData.get()); } @Test public void endSpanSuccess() { tracerProvider.endSpan(new Context(OPENTELEMETRY_SPAN_KEY, "value"), Signal.complete()); for (Tracer t : tracers) { verify(t, times(1)).end(eq("success"), isNull(), any(Context.class)); } } @Test public void endSpanNoKey() { tracerProvider.endSpan(Context.NONE, Signal.complete()); for (Tracer t : tracers) { verify(t, never()).end("", null, Context.NONE); } } @Test public void endSpanError() { Throwable testThrow = new Throwable("testError"); Context sendContext = new Context(OPENTELEMETRY_SPAN_KEY, "value"); tracerProvider.endSpan(sendContext, Signal.error(testThrow)); for (Tracer t : tracers) { verify(t, times(1)).end("", testThrow, sendContext); } } @Test public void endSpanOnSubscribe() { Throwable testThrow = new Throwable("testError"); Context sendContext = new Context(OPENTELEMETRY_SPAN_KEY, "value"); tracerProvider.endSpan(sendContext, Signal.error(testThrow)); for (Tracer t : tracers) { verify(t, times(1)).end("", testThrow, sendContext); } } @Test @Test(expected = NullPointerException.class) public void addSpanLinksNoContext() { tracerProvider.addSpanLinks(null); } /** * Verify that we add spans for all the tracers. */ @Test(expected = NullPointerException.class) public void addSpanLinks() { tracerProvider.addSpanLinks(null); } }
These variables don't need to be created here. They are only used in the ON_ERROR case.
public void endSpan(Context context, Signal<Void> signal) { Objects.requireNonNull(context, "'context' cannot be null"); Objects.requireNonNull(signal, "'signal' cannot be null"); if (!context.getData(OPENTELEMETRY_SPAN_KEY).isPresent()) { return; } String errorCondition; Throwable throwable; switch (signal.getType()) { case ON_COMPLETE: end("success", null, context); case ON_ERROR: errorCondition = ""; throwable = null; if (signal.hasError()) { throwable = signal.getThrowable(); if (throwable instanceof AmqpException) { AmqpException exception = (AmqpException) throwable; errorCondition = exception.getErrorCondition().getErrorCondition(); } } end(errorCondition, throwable, context); default: break; } }
String errorCondition;
public void endSpan(Context context, Signal<Void> signal) { Objects.requireNonNull(context, "'context' cannot be null"); Objects.requireNonNull(signal, "'signal' cannot be null"); if (!context.getData(OPENTELEMETRY_SPAN_KEY).isPresent()) { return; } switch (signal.getType()) { case ON_COMPLETE: end("success", null, context); break; case ON_ERROR: String errorCondition = ""; Throwable throwable = null; if (signal.hasError()) { throwable = signal.getThrowable(); if (throwable instanceof AmqpException) { AmqpException exception = (AmqpException) throwable; errorCondition = exception.getErrorCondition().getErrorCondition(); } } end(errorCondition, throwable, context); break; default: break; } }
class TracerProvider { private final List<Tracer> tracers = new ArrayList<>(); public TracerProvider(Iterable<Tracer> tracers) { Objects.requireNonNull(tracers, "'tracers' cannot be null."); tracers.forEach(e -> this.tracers.add(e)); } public boolean isEnabled() { return tracers.size() > 0; } /** * For each tracer plugged into the SDK a new tracing span is created. * * The {@code context} will be checked for containing information about a parent span. If a parent span is found the * new span will be added as a child, otherwise the span will be created and added to the context and any downstream * start calls will use the created span as the parent. * * @param {@link Context context} Additional metadata that is passed through the call stack. * @param {@link ProcessKind processKind} the invoking process type. * @return An updated context object. */ public Context startSpan(Context context, ProcessKind processKind) { Context local = Objects.requireNonNull(context, "'context' cannot be null"); Objects.requireNonNull(processKind, "'processKind' cannot be null"); String spanName = "Azure.eventhubs." + processKind.getProcessKind(); for (Tracer tracer : tracers) { local = tracer.start(spanName, local, processKind); } return local; } /** * Given a context containing the current tracing span the span is marked completed with status info from {@link Signal} * For each tracer plugged into the SDK the current tracing span is marked as completed. * * @param context Additional metadata that is passed through the call stack. * @param signal The signal indicates the status and contains the metadata we need to end the tracing span. */ /** * For each tracer plugged into the SDK a link is created between the parent tracing span and * the current service call. * * @param context Additional metadata that is passed through the call stack. */ public void addSpanLinks(Context context) { Objects.requireNonNull(context, "'context' cannot be null"); tracers.forEach(tracer -> tracer.addLink(context)); } /** * For each tracer plugged into the SDK a new context is extracted from the event's diagnostic Id. * * @param diagnosticId Unique identifier of an external call from producer to the queue. */ public Context extractContext(String diagnosticId, Context context) { Context local = Objects.requireNonNull(context, "'context' cannot be null"); Objects.requireNonNull(diagnosticId, "'diagnosticId' cannot be null"); for (Tracer tracer : tracers) { local = tracer.extractContext(diagnosticId, local); } return local; } private void end(String statusMessage, Throwable throwable, Context context) { for (Tracer tracer : tracers) { tracer.end(statusMessage, throwable, context); } } }
class TracerProvider { private final ClientLogger logger = new ClientLogger(TracerProvider.class); private final List<Tracer> tracers = new ArrayList<>(); public TracerProvider(Iterable<Tracer> tracers) { Objects.requireNonNull(tracers, "'tracers' cannot be null."); tracers.forEach(e -> this.tracers.add(e)); } public boolean isEnabled() { return tracers.size() > 0; } /** * For each tracer plugged into the SDK a new tracing span is created. * * The {@code context} will be checked for containing information about a parent span. If a parent span is found the * new span will be added as a child, otherwise the span will be created and added to the context and any downstream * start calls will use the created span as the parent. * * @param context Additional metadata that is passed through the call stack. * @param processKind the invoking process type. * @return An updated context object. */ public Context startSpan(Context context, ProcessKind processKind) { Context local = Objects.requireNonNull(context, "'context' cannot be null"); Objects.requireNonNull(processKind, "'processKind' cannot be null"); String spanName = getSpanName(processKind); for (Tracer tracer : tracers) { local = tracer.start(spanName, local, processKind); } return local; } /** * Given a context containing the current tracing span the span is marked completed with status info from * {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed. * * @param context Additional metadata that is passed through the call stack. * @param signal The signal indicates the status and contains the metadata we need to end the tracing span. */ /** * For each tracer plugged into the SDK a link is created between the parent tracing span and * the current service call. * * @param context Additional metadata that is passed through the call stack. */ public void addSpanLinks(Context context) { Objects.requireNonNull(context, "'context' cannot be null"); tracers.forEach(tracer -> tracer.addLink(context)); } /** * For each tracer plugged into the SDK a new context is extracted from the event's diagnostic Id. * * @param diagnosticId Unique identifier of an external call from producer to the queue. */ public Context extractContext(String diagnosticId, Context context) { Context local = Objects.requireNonNull(context, "'context' cannot be null"); Objects.requireNonNull(diagnosticId, "'diagnosticId' cannot be null"); for (Tracer tracer : tracers) { local = tracer.extractContext(diagnosticId, local); } return local; } private void end(String statusMessage, Throwable throwable, Context context) { for (Tracer tracer : tracers) { tracer.end(statusMessage, throwable, context); } } private String getSpanName(ProcessKind processKind) { String spanName = "Azure.eventhubs."; switch (processKind) { case SEND: spanName += "send"; break; case RECEIVE: spanName += "message"; break; case PROCESS: spanName += "process"; break; default: logger.warning("Unknown processKind type: {}", processKind); break; } return spanName; } }
we should consider making these as defined constants.
private String getPartitionLeasePrefix() { return this.settings.getContainerNamePrefix() + ".."; }
return this.settings.getContainerNamePrefix() + "..";
private String getPartitionLeasePrefix() { return this.settings.getContainerNamePrefix() + LEASE_STORE_MANAGER_LEASE_SUFFIX; }
class LeaseStoreManagerImpl implements LeaseStoreManager, LeaseStoreManager.LeaseStoreManagerBuilderDefinition { private final Logger logger = LoggerFactory.getLogger(LeaseStoreManagerImpl.class); private LeaseStoreManagerSettings settings; private ChangeFeedContextClient leaseDocumentClient; private RequestOptionsFactory requestOptionsFactory; private ServiceItemLeaseUpdater leaseUpdater; private LeaseStore leaseStore; public static LeaseStoreManagerBuilderDefinition Builder() { return new LeaseStoreManagerImpl(); } public LeaseStoreManagerImpl() { this.settings = new LeaseStoreManagerSettings(); } @Override public LeaseStoreManagerBuilderDefinition leaseContextClient(ChangeFeedContextClient leaseContextClient) { if (leaseContextClient == null) { throw new IllegalArgumentException("leaseContextClient"); } this.leaseDocumentClient = leaseContextClient; return this; } @Override public LeaseStoreManagerBuilderDefinition leasePrefix(String leasePrefix) { if (leasePrefix == null) { throw new IllegalArgumentException("leasePrefix"); } this.settings.withContainerNamePrefix(leasePrefix); return this; } @Override public LeaseStoreManagerBuilderDefinition leaseCollectionLink(CosmosContainer leaseCollectionLink) { if (leaseCollectionLink == null) { throw new IllegalArgumentException("leaseCollectionLink"); } this.settings.withLeaseCollectionLink(leaseCollectionLink); return this; } @Override public LeaseStoreManagerBuilderDefinition requestOptionsFactory(RequestOptionsFactory requestOptionsFactory) { if (requestOptionsFactory == null) { throw new IllegalArgumentException("requestOptionsFactory"); } this.requestOptionsFactory = requestOptionsFactory; return this; } @Override public LeaseStoreManagerBuilderDefinition hostName(String hostName) { if (hostName == null) { throw new IllegalArgumentException("hostName"); } this.settings.withHostName(hostName); return this; } @Override public Mono<LeaseStoreManager> build() { if (this.settings == null) { throw new IllegalArgumentException("properties"); } if (this.settings.getContainerNamePrefix() == null) { throw new IllegalArgumentException("properties.containerNamePrefix"); } if (this.settings.getLeaseCollectionLink() == null) { throw new IllegalArgumentException("properties.leaseCollectionLink"); } if (this.settings.getHostName() == null || this.settings.getHostName().isEmpty()) { throw new IllegalArgumentException("properties.hostName"); } if (this.leaseDocumentClient == null) { throw new IllegalArgumentException("leaseDocumentClient"); } if (this.requestOptionsFactory == null) { throw new IllegalArgumentException("requestOptionsFactory"); } if (this.leaseUpdater == null) { this.leaseUpdater = new DocumentServiceLeaseUpdaterImpl(leaseDocumentClient); } this.leaseStore = new DocumentServiceLeaseStore( this.leaseDocumentClient, this.settings.getContainerNamePrefix(), this.settings.getLeaseCollectionLink(), this.requestOptionsFactory); if (this.settings.getLeaseCollectionLink() == null) { throw new IllegalArgumentException("leaseCollectionLink was not specified"); } if (this.requestOptionsFactory == null) { throw new IllegalArgumentException("requestOptionsFactory was not specified"); } return Mono.just(this); } @Override public Flux<Lease> getAllLeases() { return this.listDocuments(this.getPartitionLeasePrefix()) .map(documentServiceLease -> documentServiceLease); } @Override public Flux<Lease> getOwnedLeases() { return this.getAllLeases() .filter(lease -> lease.getOwner() != null && lease.getOwner().equalsIgnoreCase(this.settings.getHostName())); } @Override public Mono<Lease> createLeaseIfNotExist(String leaseToken, String continuationToken) { if (leaseToken == null) { throw new IllegalArgumentException("leaseToken"); } String leaseDocId = this.getDocumentId(leaseToken); ServiceItemLease documentServiceLease = new ServiceItemLease() .withId(leaseDocId) .withLeaseToken(leaseToken) .withContinuationToken(continuationToken); return this.leaseDocumentClient.createItem(this.settings.getLeaseCollectionLink(), documentServiceLease, null, false) .onErrorResume( ex -> { if (ex instanceof CosmosClientException) { CosmosClientException e = (CosmosClientException) ex; if (e.statusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_CONFLICT) { logger.info("Some other host created lease for {}.", leaseToken); return Mono.empty(); } } return Mono.error(ex); }) .map(documentResourceResponse -> { if (documentResourceResponse == null) { return null; } CosmosItemProperties document = documentResourceResponse.properties(); logger.info("Created lease for partition {}.", leaseToken); return documentServiceLease .withId(document.id()) .withEtag(document.etag()) .withTs(document.getString(Constants.Properties.LAST_MODIFIED)); }); } @Override public Mono<Void> delete(Lease lease) { if (lease == null || lease.getId() == null) { throw new IllegalArgumentException("lease"); } CosmosItem itemForLease = this.createItemForLease(lease.getId()); return this.leaseDocumentClient .deleteItem(itemForLease, this.requestOptionsFactory.createRequestOptions(lease)) .onErrorResume( ex -> { if (ex instanceof CosmosClientException) { CosmosClientException e = (CosmosClientException) ex; if (e.statusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND) { return Mono.empty(); } } return Mono.error(ex); }) .map( documentResourceResponse -> true) .then(); } @Override public Mono<Lease> acquire(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } String oldOwner = lease.getOwner(); return this.leaseUpdater.updateLease( lease, this.createItemForLease(lease.getId()), this.requestOptionsFactory.createRequestOptions(lease), serverLease -> { if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(oldOwner)) { logger.info("Partition {} lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setOwner(this.settings.getHostName()); serverLease.setProperties(lease.getProperties()); return serverLease; }); } @Override public Mono<Void> release(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } CosmosItem itemForLease = this.createItemForLease(lease.getId()); return this.leaseDocumentClient.readItem(itemForLease, this.requestOptionsFactory.createRequestOptions(lease)) .onErrorResume( ex -> { if (ex instanceof CosmosClientException) { CosmosClientException e = (CosmosClientException) ex; if (e.statusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND) { logger.info("Partition {} failed to renew lease. The lease is gone already.", lease.getLeaseToken()); throw new LeaseLostException(lease); } } return Mono.error(ex); }) .map( documentResourceResponse -> ServiceItemLease.fromDocument(documentResourceResponse.properties())) .flatMap( refreshedLease -> this.leaseUpdater.updateLease( refreshedLease, this.createItemForLease(refreshedLease.getId()), this.requestOptionsFactory.createRequestOptions(lease), serverLease -> { if (serverLease.getOwner() != null) { if (!serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Partition {} no need to release lease. The lease was already taken by another host '{}'.", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } } serverLease.setOwner(null); return serverLease; }) ).then(); } @Override public Mono<Lease> renew(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } CosmosItem itemForLease = this.createItemForLease(lease.getId()); return this.leaseDocumentClient.readItem(itemForLease, this.requestOptionsFactory.createRequestOptions(lease)) .onErrorResume( ex -> { if (ex instanceof CosmosClientException) { CosmosClientException e = (CosmosClientException) ex; if (e.statusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND) { logger.info("Partition {} failed to renew lease. The lease is gone already.", lease.getLeaseToken()); throw new LeaseLostException(lease); } } return Mono.error(ex); }) .map( documentResourceResponse -> ServiceItemLease.fromDocument(documentResourceResponse.properties())) .flatMap( refreshedLease -> this.leaseUpdater.updateLease( refreshedLease, this.createItemForLease(refreshedLease.getId()), this.requestOptionsFactory.createRequestOptions(lease), serverLease -> { if (!serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Partition {} lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } return serverLease; }) ); } @Override public Mono<Lease> updateProperties(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } if (!lease.getOwner().equalsIgnoreCase(this.settings.getHostName())) { logger.info("Partition '{}' lease was taken over by owner '{}' before lease item update", lease.getLeaseToken(), lease.getOwner()); throw new LeaseLostException(lease); } return this.leaseUpdater.updateLease( lease, this.createItemForLease(lease.getId()), this.requestOptionsFactory.createRequestOptions(lease), serverLease -> { if (!serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Partition '{}' lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setProperties(lease.getProperties()); return serverLease; }); } @Override public Mono<Lease> checkpoint(Lease lease, String continuationToken) { if (lease == null) { throw new IllegalArgumentException("lease"); } if (continuationToken == null || continuationToken.isEmpty()) { throw new IllegalArgumentException("continuationToken must be a non-empty string"); } return this.leaseUpdater.updateLease( lease, this.createItemForLease(lease.getId()), this.requestOptionsFactory.createRequestOptions(lease), serverLease -> { if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Partition {} lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setContinuationToken(continuationToken); return serverLease; }); } @Override public Mono<Boolean> isInitialized() { return this.leaseStore.isInitialized(); } @Override public Mono<Boolean> markInitialized() { return this.leaseStore.markInitialized(); } @Override public Mono<Boolean> acquireInitializationLock(Duration lockExpirationTime) { return this.leaseStore.acquireInitializationLock(lockExpirationTime); } @Override public Mono<Boolean> releaseInitializationLock() { return this.leaseStore.releaseInitializationLock(); } private Mono<ServiceItemLease> tryGetLease(Lease lease) { CosmosItem itemForLease = this.createItemForLease(lease.getId()); return this.leaseDocumentClient.readItem(itemForLease, this.requestOptionsFactory.createRequestOptions(lease)) .onErrorResume( ex -> { if (ex instanceof CosmosClientException) { CosmosClientException e = (CosmosClientException) ex; if (e.statusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND) { return Mono.empty(); } } return Mono.error(ex); }) .map( documentResourceResponse -> { if (documentResourceResponse == null) return null; return ServiceItemLease.fromDocument(documentResourceResponse.properties()); }); } private Flux<ServiceItemLease> listDocuments(String prefix) { if (prefix == null || prefix.isEmpty()) { throw new IllegalArgumentException("prefix"); } SqlParameter param = new SqlParameter(); param.name("@PartitionLeasePrefix"); param.value(prefix); SqlQuerySpec querySpec = new SqlQuerySpec( "SELECT * FROM c WHERE STARTSWITH(c.id, @PartitionLeasePrefix)", new SqlParameterList(param)); Flux<FeedResponse<CosmosItemProperties>> query = this.leaseDocumentClient.queryItems( this.settings.getLeaseCollectionLink(), querySpec, this.requestOptionsFactory.createFeedOptions()); return query.flatMap( documentFeedResponse -> Flux.fromIterable(documentFeedResponse.results())) .map(ServiceItemLease::fromDocument); } private String getDocumentId(String leaseToken) { return this.getPartitionLeasePrefix() + leaseToken; } private CosmosItem createItemForLease(String leaseId) { return this.leaseDocumentClient.getContainerClient().getItem(leaseId, "/id"); } }
class LeaseStoreManagerImpl implements LeaseStoreManager, LeaseStoreManager.LeaseStoreManagerBuilderDefinition { private final String LEASE_STORE_MANAGER_LEASE_SUFFIX = ".."; private final Logger logger = LoggerFactory.getLogger(LeaseStoreManagerImpl.class); private LeaseStoreManagerSettings settings; private ChangeFeedContextClient leaseDocumentClient; private RequestOptionsFactory requestOptionsFactory; private ServiceItemLeaseUpdater leaseUpdater; private LeaseStore leaseStore; public static LeaseStoreManagerBuilderDefinition Builder() { return new LeaseStoreManagerImpl(); } public LeaseStoreManagerImpl() { this.settings = new LeaseStoreManagerSettings(); } @Override public LeaseStoreManagerBuilderDefinition leaseContextClient(ChangeFeedContextClient leaseContextClient) { if (leaseContextClient == null) { throw new IllegalArgumentException("leaseContextClient"); } this.leaseDocumentClient = leaseContextClient; return this; } @Override public LeaseStoreManagerBuilderDefinition leasePrefix(String leasePrefix) { if (leasePrefix == null) { throw new IllegalArgumentException("leasePrefix"); } this.settings.withContainerNamePrefix(leasePrefix); return this; } @Override public LeaseStoreManagerBuilderDefinition leaseCollectionLink(CosmosContainer leaseCollectionLink) { if (leaseCollectionLink == null) { throw new IllegalArgumentException("leaseCollectionLink"); } this.settings.withLeaseCollectionLink(leaseCollectionLink); return this; } @Override public LeaseStoreManagerBuilderDefinition requestOptionsFactory(RequestOptionsFactory requestOptionsFactory) { if (requestOptionsFactory == null) { throw new IllegalArgumentException("requestOptionsFactory"); } this.requestOptionsFactory = requestOptionsFactory; return this; } @Override public LeaseStoreManagerBuilderDefinition hostName(String hostName) { if (hostName == null) { throw new IllegalArgumentException("hostName"); } this.settings.withHostName(hostName); return this; } @Override public Mono<LeaseStoreManager> build() { if (this.settings == null) { throw new IllegalArgumentException("properties"); } if (this.settings.getContainerNamePrefix() == null) { throw new IllegalArgumentException("properties.containerNamePrefix"); } if (this.settings.getLeaseCollectionLink() == null) { throw new IllegalArgumentException("properties.leaseCollectionLink"); } if (this.settings.getHostName() == null || this.settings.getHostName().isEmpty()) { throw new IllegalArgumentException("properties.hostName"); } if (this.leaseDocumentClient == null) { throw new IllegalArgumentException("leaseDocumentClient"); } if (this.requestOptionsFactory == null) { throw new IllegalArgumentException("requestOptionsFactory"); } if (this.leaseUpdater == null) { this.leaseUpdater = new DocumentServiceLeaseUpdaterImpl(leaseDocumentClient); } this.leaseStore = new DocumentServiceLeaseStore( this.leaseDocumentClient, this.settings.getContainerNamePrefix(), this.settings.getLeaseCollectionLink(), this.requestOptionsFactory); if (this.settings.getLeaseCollectionLink() == null) { throw new IllegalArgumentException("leaseCollectionLink was not specified"); } if (this.requestOptionsFactory == null) { throw new IllegalArgumentException("requestOptionsFactory was not specified"); } return Mono.just(this); } @Override public Flux<Lease> getAllLeases() { return this.listDocuments(this.getPartitionLeasePrefix()) .map(documentServiceLease -> documentServiceLease); } @Override public Flux<Lease> getOwnedLeases() { return this.getAllLeases() .filter(lease -> lease.getOwner() != null && lease.getOwner().equalsIgnoreCase(this.settings.getHostName())); } @Override public Mono<Lease> createLeaseIfNotExist(String leaseToken, String continuationToken) { if (leaseToken == null) { throw new IllegalArgumentException("leaseToken"); } String leaseDocId = this.getDocumentId(leaseToken); ServiceItemLease documentServiceLease = new ServiceItemLease() .withId(leaseDocId) .withLeaseToken(leaseToken) .withContinuationToken(continuationToken); return this.leaseDocumentClient.createItem(this.settings.getLeaseCollectionLink(), documentServiceLease, null, false) .onErrorResume( ex -> { if (ex instanceof CosmosClientException) { CosmosClientException e = (CosmosClientException) ex; if (e.statusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_CONFLICT) { logger.info("Some other host created lease for {}.", leaseToken); return Mono.empty(); } } return Mono.error(ex); }) .map(documentResourceResponse -> { if (documentResourceResponse == null) { return null; } CosmosItemProperties document = documentResourceResponse.properties(); logger.info("Created lease for partition {}.", leaseToken); return documentServiceLease .withId(document.id()) .withEtag(document.etag()) .withTs(document.getString(Constants.Properties.LAST_MODIFIED)); }); } @Override public Mono<Void> delete(Lease lease) { if (lease == null || lease.getId() == null) { throw new IllegalArgumentException("lease"); } CosmosItem itemForLease = this.createItemForLease(lease.getId()); return this.leaseDocumentClient .deleteItem(itemForLease, this.requestOptionsFactory.createRequestOptions(lease)) .onErrorResume( ex -> { if (ex instanceof CosmosClientException) { CosmosClientException e = (CosmosClientException) ex; if (e.statusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND) { return Mono.empty(); } } return Mono.error(ex); }) .map( documentResourceResponse -> true) .then(); } @Override public Mono<Lease> acquire(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } String oldOwner = lease.getOwner(); return this.leaseUpdater.updateLease( lease, this.createItemForLease(lease.getId()), this.requestOptionsFactory.createRequestOptions(lease), serverLease -> { if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(oldOwner)) { logger.info("Partition {} lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setOwner(this.settings.getHostName()); serverLease.setProperties(lease.getProperties()); return serverLease; }); } @Override public Mono<Void> release(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } CosmosItem itemForLease = this.createItemForLease(lease.getId()); return this.leaseDocumentClient.readItem(itemForLease, this.requestOptionsFactory.createRequestOptions(lease)) .onErrorResume( ex -> { if (ex instanceof CosmosClientException) { CosmosClientException e = (CosmosClientException) ex; if (e.statusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND) { logger.info("Partition {} failed to renew lease. The lease is gone already.", lease.getLeaseToken()); throw new LeaseLostException(lease); } } return Mono.error(ex); }) .map( documentResourceResponse -> ServiceItemLease.fromDocument(documentResourceResponse.properties())) .flatMap( refreshedLease -> this.leaseUpdater.updateLease( refreshedLease, this.createItemForLease(refreshedLease.getId()), this.requestOptionsFactory.createRequestOptions(lease), serverLease -> { if (serverLease.getOwner() != null) { if (!serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Partition {} no need to release lease. The lease was already taken by another host '{}'.", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } } serverLease.setOwner(null); return serverLease; }) ).then(); } @Override public Mono<Lease> renew(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } CosmosItem itemForLease = this.createItemForLease(lease.getId()); return this.leaseDocumentClient.readItem(itemForLease, this.requestOptionsFactory.createRequestOptions(lease)) .onErrorResume( ex -> { if (ex instanceof CosmosClientException) { CosmosClientException e = (CosmosClientException) ex; if (e.statusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND) { logger.info("Partition {} failed to renew lease. The lease is gone already.", lease.getLeaseToken()); throw new LeaseLostException(lease); } } return Mono.error(ex); }) .map( documentResourceResponse -> ServiceItemLease.fromDocument(documentResourceResponse.properties())) .flatMap( refreshedLease -> this.leaseUpdater.updateLease( refreshedLease, this.createItemForLease(refreshedLease.getId()), this.requestOptionsFactory.createRequestOptions(lease), serverLease -> { if (!serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Partition {} lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } return serverLease; }) ); } @Override public Mono<Lease> updateProperties(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } if (!lease.getOwner().equalsIgnoreCase(this.settings.getHostName())) { logger.info("Partition '{}' lease was taken over by owner '{}' before lease item update", lease.getLeaseToken(), lease.getOwner()); throw new LeaseLostException(lease); } return this.leaseUpdater.updateLease( lease, this.createItemForLease(lease.getId()), this.requestOptionsFactory.createRequestOptions(lease), serverLease -> { if (!serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Partition '{}' lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setProperties(lease.getProperties()); return serverLease; }); } @Override public Mono<Lease> checkpoint(Lease lease, String continuationToken) { if (lease == null) { throw new IllegalArgumentException("lease"); } if (continuationToken == null || continuationToken.isEmpty()) { throw new IllegalArgumentException("continuationToken must be a non-empty string"); } return this.leaseUpdater.updateLease( lease, this.createItemForLease(lease.getId()), this.requestOptionsFactory.createRequestOptions(lease), serverLease -> { if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Partition {} lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setContinuationToken(continuationToken); return serverLease; }); } @Override public Mono<Boolean> isInitialized() { return this.leaseStore.isInitialized(); } @Override public Mono<Boolean> markInitialized() { return this.leaseStore.markInitialized(); } @Override public Mono<Boolean> acquireInitializationLock(Duration lockExpirationTime) { return this.leaseStore.acquireInitializationLock(lockExpirationTime); } @Override public Mono<Boolean> releaseInitializationLock() { return this.leaseStore.releaseInitializationLock(); } private Mono<ServiceItemLease> tryGetLease(Lease lease) { CosmosItem itemForLease = this.createItemForLease(lease.getId()); return this.leaseDocumentClient.readItem(itemForLease, this.requestOptionsFactory.createRequestOptions(lease)) .onErrorResume( ex -> { if (ex instanceof CosmosClientException) { CosmosClientException e = (CosmosClientException) ex; if (e.statusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND) { return Mono.empty(); } } return Mono.error(ex); }) .map( documentResourceResponse -> { if (documentResourceResponse == null) return null; return ServiceItemLease.fromDocument(documentResourceResponse.properties()); }); } private Flux<ServiceItemLease> listDocuments(String prefix) { if (prefix == null || prefix.isEmpty()) { throw new IllegalArgumentException("prefix"); } SqlParameter param = new SqlParameter(); param.name("@PartitionLeasePrefix"); param.value(prefix); SqlQuerySpec querySpec = new SqlQuerySpec( "SELECT * FROM c WHERE STARTSWITH(c.id, @PartitionLeasePrefix)", new SqlParameterList(param)); Flux<FeedResponse<CosmosItemProperties>> query = this.leaseDocumentClient.queryItems( this.settings.getLeaseCollectionLink(), querySpec, this.requestOptionsFactory.createFeedOptions()); return query.flatMap( documentFeedResponse -> Flux.fromIterable(documentFeedResponse.results())) .map(ServiceItemLease::fromDocument); } private String getDocumentId(String leaseToken) { return this.getPartitionLeasePrefix() + leaseToken; } private CosmosItem createItemForLease(String leaseId) { return this.leaseDocumentClient.getContainerClient().getItem(leaseId, "/id"); } }
changed to a constant
private String getPartitionLeasePrefix() { return this.settings.getContainerNamePrefix() + ".."; }
return this.settings.getContainerNamePrefix() + "..";
private String getPartitionLeasePrefix() { return this.settings.getContainerNamePrefix() + LEASE_STORE_MANAGER_LEASE_SUFFIX; }
class LeaseStoreManagerImpl implements LeaseStoreManager, LeaseStoreManager.LeaseStoreManagerBuilderDefinition { private final Logger logger = LoggerFactory.getLogger(LeaseStoreManagerImpl.class); private LeaseStoreManagerSettings settings; private ChangeFeedContextClient leaseDocumentClient; private RequestOptionsFactory requestOptionsFactory; private ServiceItemLeaseUpdater leaseUpdater; private LeaseStore leaseStore; public static LeaseStoreManagerBuilderDefinition Builder() { return new LeaseStoreManagerImpl(); } public LeaseStoreManagerImpl() { this.settings = new LeaseStoreManagerSettings(); } @Override public LeaseStoreManagerBuilderDefinition leaseContextClient(ChangeFeedContextClient leaseContextClient) { if (leaseContextClient == null) { throw new IllegalArgumentException("leaseContextClient"); } this.leaseDocumentClient = leaseContextClient; return this; } @Override public LeaseStoreManagerBuilderDefinition leasePrefix(String leasePrefix) { if (leasePrefix == null) { throw new IllegalArgumentException("leasePrefix"); } this.settings.withContainerNamePrefix(leasePrefix); return this; } @Override public LeaseStoreManagerBuilderDefinition leaseCollectionLink(CosmosContainer leaseCollectionLink) { if (leaseCollectionLink == null) { throw new IllegalArgumentException("leaseCollectionLink"); } this.settings.withLeaseCollectionLink(leaseCollectionLink); return this; } @Override public LeaseStoreManagerBuilderDefinition requestOptionsFactory(RequestOptionsFactory requestOptionsFactory) { if (requestOptionsFactory == null) { throw new IllegalArgumentException("requestOptionsFactory"); } this.requestOptionsFactory = requestOptionsFactory; return this; } @Override public LeaseStoreManagerBuilderDefinition hostName(String hostName) { if (hostName == null) { throw new IllegalArgumentException("hostName"); } this.settings.withHostName(hostName); return this; } @Override public Mono<LeaseStoreManager> build() { if (this.settings == null) { throw new IllegalArgumentException("properties"); } if (this.settings.getContainerNamePrefix() == null) { throw new IllegalArgumentException("properties.containerNamePrefix"); } if (this.settings.getLeaseCollectionLink() == null) { throw new IllegalArgumentException("properties.leaseCollectionLink"); } if (this.settings.getHostName() == null || this.settings.getHostName().isEmpty()) { throw new IllegalArgumentException("properties.hostName"); } if (this.leaseDocumentClient == null) { throw new IllegalArgumentException("leaseDocumentClient"); } if (this.requestOptionsFactory == null) { throw new IllegalArgumentException("requestOptionsFactory"); } if (this.leaseUpdater == null) { this.leaseUpdater = new DocumentServiceLeaseUpdaterImpl(leaseDocumentClient); } this.leaseStore = new DocumentServiceLeaseStore( this.leaseDocumentClient, this.settings.getContainerNamePrefix(), this.settings.getLeaseCollectionLink(), this.requestOptionsFactory); if (this.settings.getLeaseCollectionLink() == null) { throw new IllegalArgumentException("leaseCollectionLink was not specified"); } if (this.requestOptionsFactory == null) { throw new IllegalArgumentException("requestOptionsFactory was not specified"); } return Mono.just(this); } @Override public Flux<Lease> getAllLeases() { return this.listDocuments(this.getPartitionLeasePrefix()) .map(documentServiceLease -> documentServiceLease); } @Override public Flux<Lease> getOwnedLeases() { return this.getAllLeases() .filter(lease -> lease.getOwner() != null && lease.getOwner().equalsIgnoreCase(this.settings.getHostName())); } @Override public Mono<Lease> createLeaseIfNotExist(String leaseToken, String continuationToken) { if (leaseToken == null) { throw new IllegalArgumentException("leaseToken"); } String leaseDocId = this.getDocumentId(leaseToken); ServiceItemLease documentServiceLease = new ServiceItemLease() .withId(leaseDocId) .withLeaseToken(leaseToken) .withContinuationToken(continuationToken); return this.leaseDocumentClient.createItem(this.settings.getLeaseCollectionLink(), documentServiceLease, null, false) .onErrorResume( ex -> { if (ex instanceof CosmosClientException) { CosmosClientException e = (CosmosClientException) ex; if (e.statusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_CONFLICT) { logger.info("Some other host created lease for {}.", leaseToken); return Mono.empty(); } } return Mono.error(ex); }) .map(documentResourceResponse -> { if (documentResourceResponse == null) { return null; } CosmosItemProperties document = documentResourceResponse.properties(); logger.info("Created lease for partition {}.", leaseToken); return documentServiceLease .withId(document.id()) .withEtag(document.etag()) .withTs(document.getString(Constants.Properties.LAST_MODIFIED)); }); } @Override public Mono<Void> delete(Lease lease) { if (lease == null || lease.getId() == null) { throw new IllegalArgumentException("lease"); } CosmosItem itemForLease = this.createItemForLease(lease.getId()); return this.leaseDocumentClient .deleteItem(itemForLease, this.requestOptionsFactory.createRequestOptions(lease)) .onErrorResume( ex -> { if (ex instanceof CosmosClientException) { CosmosClientException e = (CosmosClientException) ex; if (e.statusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND) { return Mono.empty(); } } return Mono.error(ex); }) .map( documentResourceResponse -> true) .then(); } @Override public Mono<Lease> acquire(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } String oldOwner = lease.getOwner(); return this.leaseUpdater.updateLease( lease, this.createItemForLease(lease.getId()), this.requestOptionsFactory.createRequestOptions(lease), serverLease -> { if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(oldOwner)) { logger.info("Partition {} lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setOwner(this.settings.getHostName()); serverLease.setProperties(lease.getProperties()); return serverLease; }); } @Override public Mono<Void> release(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } CosmosItem itemForLease = this.createItemForLease(lease.getId()); return this.leaseDocumentClient.readItem(itemForLease, this.requestOptionsFactory.createRequestOptions(lease)) .onErrorResume( ex -> { if (ex instanceof CosmosClientException) { CosmosClientException e = (CosmosClientException) ex; if (e.statusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND) { logger.info("Partition {} failed to renew lease. The lease is gone already.", lease.getLeaseToken()); throw new LeaseLostException(lease); } } return Mono.error(ex); }) .map( documentResourceResponse -> ServiceItemLease.fromDocument(documentResourceResponse.properties())) .flatMap( refreshedLease -> this.leaseUpdater.updateLease( refreshedLease, this.createItemForLease(refreshedLease.getId()), this.requestOptionsFactory.createRequestOptions(lease), serverLease -> { if (serverLease.getOwner() != null) { if (!serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Partition {} no need to release lease. The lease was already taken by another host '{}'.", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } } serverLease.setOwner(null); return serverLease; }) ).then(); } @Override public Mono<Lease> renew(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } CosmosItem itemForLease = this.createItemForLease(lease.getId()); return this.leaseDocumentClient.readItem(itemForLease, this.requestOptionsFactory.createRequestOptions(lease)) .onErrorResume( ex -> { if (ex instanceof CosmosClientException) { CosmosClientException e = (CosmosClientException) ex; if (e.statusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND) { logger.info("Partition {} failed to renew lease. The lease is gone already.", lease.getLeaseToken()); throw new LeaseLostException(lease); } } return Mono.error(ex); }) .map( documentResourceResponse -> ServiceItemLease.fromDocument(documentResourceResponse.properties())) .flatMap( refreshedLease -> this.leaseUpdater.updateLease( refreshedLease, this.createItemForLease(refreshedLease.getId()), this.requestOptionsFactory.createRequestOptions(lease), serverLease -> { if (!serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Partition {} lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } return serverLease; }) ); } @Override public Mono<Lease> updateProperties(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } if (!lease.getOwner().equalsIgnoreCase(this.settings.getHostName())) { logger.info("Partition '{}' lease was taken over by owner '{}' before lease item update", lease.getLeaseToken(), lease.getOwner()); throw new LeaseLostException(lease); } return this.leaseUpdater.updateLease( lease, this.createItemForLease(lease.getId()), this.requestOptionsFactory.createRequestOptions(lease), serverLease -> { if (!serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Partition '{}' lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setProperties(lease.getProperties()); return serverLease; }); } @Override public Mono<Lease> checkpoint(Lease lease, String continuationToken) { if (lease == null) { throw new IllegalArgumentException("lease"); } if (continuationToken == null || continuationToken.isEmpty()) { throw new IllegalArgumentException("continuationToken must be a non-empty string"); } return this.leaseUpdater.updateLease( lease, this.createItemForLease(lease.getId()), this.requestOptionsFactory.createRequestOptions(lease), serverLease -> { if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Partition {} lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setContinuationToken(continuationToken); return serverLease; }); } @Override public Mono<Boolean> isInitialized() { return this.leaseStore.isInitialized(); } @Override public Mono<Boolean> markInitialized() { return this.leaseStore.markInitialized(); } @Override public Mono<Boolean> acquireInitializationLock(Duration lockExpirationTime) { return this.leaseStore.acquireInitializationLock(lockExpirationTime); } @Override public Mono<Boolean> releaseInitializationLock() { return this.leaseStore.releaseInitializationLock(); } private Mono<ServiceItemLease> tryGetLease(Lease lease) { CosmosItem itemForLease = this.createItemForLease(lease.getId()); return this.leaseDocumentClient.readItem(itemForLease, this.requestOptionsFactory.createRequestOptions(lease)) .onErrorResume( ex -> { if (ex instanceof CosmosClientException) { CosmosClientException e = (CosmosClientException) ex; if (e.statusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND) { return Mono.empty(); } } return Mono.error(ex); }) .map( documentResourceResponse -> { if (documentResourceResponse == null) return null; return ServiceItemLease.fromDocument(documentResourceResponse.properties()); }); } private Flux<ServiceItemLease> listDocuments(String prefix) { if (prefix == null || prefix.isEmpty()) { throw new IllegalArgumentException("prefix"); } SqlParameter param = new SqlParameter(); param.name("@PartitionLeasePrefix"); param.value(prefix); SqlQuerySpec querySpec = new SqlQuerySpec( "SELECT * FROM c WHERE STARTSWITH(c.id, @PartitionLeasePrefix)", new SqlParameterList(param)); Flux<FeedResponse<CosmosItemProperties>> query = this.leaseDocumentClient.queryItems( this.settings.getLeaseCollectionLink(), querySpec, this.requestOptionsFactory.createFeedOptions()); return query.flatMap( documentFeedResponse -> Flux.fromIterable(documentFeedResponse.results())) .map(ServiceItemLease::fromDocument); } private String getDocumentId(String leaseToken) { return this.getPartitionLeasePrefix() + leaseToken; } private CosmosItem createItemForLease(String leaseId) { return this.leaseDocumentClient.getContainerClient().getItem(leaseId, "/id"); } }
class LeaseStoreManagerImpl implements LeaseStoreManager, LeaseStoreManager.LeaseStoreManagerBuilderDefinition { private final String LEASE_STORE_MANAGER_LEASE_SUFFIX = ".."; private final Logger logger = LoggerFactory.getLogger(LeaseStoreManagerImpl.class); private LeaseStoreManagerSettings settings; private ChangeFeedContextClient leaseDocumentClient; private RequestOptionsFactory requestOptionsFactory; private ServiceItemLeaseUpdater leaseUpdater; private LeaseStore leaseStore; public static LeaseStoreManagerBuilderDefinition Builder() { return new LeaseStoreManagerImpl(); } public LeaseStoreManagerImpl() { this.settings = new LeaseStoreManagerSettings(); } @Override public LeaseStoreManagerBuilderDefinition leaseContextClient(ChangeFeedContextClient leaseContextClient) { if (leaseContextClient == null) { throw new IllegalArgumentException("leaseContextClient"); } this.leaseDocumentClient = leaseContextClient; return this; } @Override public LeaseStoreManagerBuilderDefinition leasePrefix(String leasePrefix) { if (leasePrefix == null) { throw new IllegalArgumentException("leasePrefix"); } this.settings.withContainerNamePrefix(leasePrefix); return this; } @Override public LeaseStoreManagerBuilderDefinition leaseCollectionLink(CosmosContainer leaseCollectionLink) { if (leaseCollectionLink == null) { throw new IllegalArgumentException("leaseCollectionLink"); } this.settings.withLeaseCollectionLink(leaseCollectionLink); return this; } @Override public LeaseStoreManagerBuilderDefinition requestOptionsFactory(RequestOptionsFactory requestOptionsFactory) { if (requestOptionsFactory == null) { throw new IllegalArgumentException("requestOptionsFactory"); } this.requestOptionsFactory = requestOptionsFactory; return this; } @Override public LeaseStoreManagerBuilderDefinition hostName(String hostName) { if (hostName == null) { throw new IllegalArgumentException("hostName"); } this.settings.withHostName(hostName); return this; } @Override public Mono<LeaseStoreManager> build() { if (this.settings == null) { throw new IllegalArgumentException("properties"); } if (this.settings.getContainerNamePrefix() == null) { throw new IllegalArgumentException("properties.containerNamePrefix"); } if (this.settings.getLeaseCollectionLink() == null) { throw new IllegalArgumentException("properties.leaseCollectionLink"); } if (this.settings.getHostName() == null || this.settings.getHostName().isEmpty()) { throw new IllegalArgumentException("properties.hostName"); } if (this.leaseDocumentClient == null) { throw new IllegalArgumentException("leaseDocumentClient"); } if (this.requestOptionsFactory == null) { throw new IllegalArgumentException("requestOptionsFactory"); } if (this.leaseUpdater == null) { this.leaseUpdater = new DocumentServiceLeaseUpdaterImpl(leaseDocumentClient); } this.leaseStore = new DocumentServiceLeaseStore( this.leaseDocumentClient, this.settings.getContainerNamePrefix(), this.settings.getLeaseCollectionLink(), this.requestOptionsFactory); if (this.settings.getLeaseCollectionLink() == null) { throw new IllegalArgumentException("leaseCollectionLink was not specified"); } if (this.requestOptionsFactory == null) { throw new IllegalArgumentException("requestOptionsFactory was not specified"); } return Mono.just(this); } @Override public Flux<Lease> getAllLeases() { return this.listDocuments(this.getPartitionLeasePrefix()) .map(documentServiceLease -> documentServiceLease); } @Override public Flux<Lease> getOwnedLeases() { return this.getAllLeases() .filter(lease -> lease.getOwner() != null && lease.getOwner().equalsIgnoreCase(this.settings.getHostName())); } @Override public Mono<Lease> createLeaseIfNotExist(String leaseToken, String continuationToken) { if (leaseToken == null) { throw new IllegalArgumentException("leaseToken"); } String leaseDocId = this.getDocumentId(leaseToken); ServiceItemLease documentServiceLease = new ServiceItemLease() .withId(leaseDocId) .withLeaseToken(leaseToken) .withContinuationToken(continuationToken); return this.leaseDocumentClient.createItem(this.settings.getLeaseCollectionLink(), documentServiceLease, null, false) .onErrorResume( ex -> { if (ex instanceof CosmosClientException) { CosmosClientException e = (CosmosClientException) ex; if (e.statusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_CONFLICT) { logger.info("Some other host created lease for {}.", leaseToken); return Mono.empty(); } } return Mono.error(ex); }) .map(documentResourceResponse -> { if (documentResourceResponse == null) { return null; } CosmosItemProperties document = documentResourceResponse.properties(); logger.info("Created lease for partition {}.", leaseToken); return documentServiceLease .withId(document.id()) .withEtag(document.etag()) .withTs(document.getString(Constants.Properties.LAST_MODIFIED)); }); } @Override public Mono<Void> delete(Lease lease) { if (lease == null || lease.getId() == null) { throw new IllegalArgumentException("lease"); } CosmosItem itemForLease = this.createItemForLease(lease.getId()); return this.leaseDocumentClient .deleteItem(itemForLease, this.requestOptionsFactory.createRequestOptions(lease)) .onErrorResume( ex -> { if (ex instanceof CosmosClientException) { CosmosClientException e = (CosmosClientException) ex; if (e.statusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND) { return Mono.empty(); } } return Mono.error(ex); }) .map( documentResourceResponse -> true) .then(); } @Override public Mono<Lease> acquire(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } String oldOwner = lease.getOwner(); return this.leaseUpdater.updateLease( lease, this.createItemForLease(lease.getId()), this.requestOptionsFactory.createRequestOptions(lease), serverLease -> { if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(oldOwner)) { logger.info("Partition {} lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setOwner(this.settings.getHostName()); serverLease.setProperties(lease.getProperties()); return serverLease; }); } @Override public Mono<Void> release(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } CosmosItem itemForLease = this.createItemForLease(lease.getId()); return this.leaseDocumentClient.readItem(itemForLease, this.requestOptionsFactory.createRequestOptions(lease)) .onErrorResume( ex -> { if (ex instanceof CosmosClientException) { CosmosClientException e = (CosmosClientException) ex; if (e.statusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND) { logger.info("Partition {} failed to renew lease. The lease is gone already.", lease.getLeaseToken()); throw new LeaseLostException(lease); } } return Mono.error(ex); }) .map( documentResourceResponse -> ServiceItemLease.fromDocument(documentResourceResponse.properties())) .flatMap( refreshedLease -> this.leaseUpdater.updateLease( refreshedLease, this.createItemForLease(refreshedLease.getId()), this.requestOptionsFactory.createRequestOptions(lease), serverLease -> { if (serverLease.getOwner() != null) { if (!serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Partition {} no need to release lease. The lease was already taken by another host '{}'.", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } } serverLease.setOwner(null); return serverLease; }) ).then(); } @Override public Mono<Lease> renew(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } CosmosItem itemForLease = this.createItemForLease(lease.getId()); return this.leaseDocumentClient.readItem(itemForLease, this.requestOptionsFactory.createRequestOptions(lease)) .onErrorResume( ex -> { if (ex instanceof CosmosClientException) { CosmosClientException e = (CosmosClientException) ex; if (e.statusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND) { logger.info("Partition {} failed to renew lease. The lease is gone already.", lease.getLeaseToken()); throw new LeaseLostException(lease); } } return Mono.error(ex); }) .map( documentResourceResponse -> ServiceItemLease.fromDocument(documentResourceResponse.properties())) .flatMap( refreshedLease -> this.leaseUpdater.updateLease( refreshedLease, this.createItemForLease(refreshedLease.getId()), this.requestOptionsFactory.createRequestOptions(lease), serverLease -> { if (!serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Partition {} lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } return serverLease; }) ); } @Override public Mono<Lease> updateProperties(Lease lease) { if (lease == null) { throw new IllegalArgumentException("lease"); } if (!lease.getOwner().equalsIgnoreCase(this.settings.getHostName())) { logger.info("Partition '{}' lease was taken over by owner '{}' before lease item update", lease.getLeaseToken(), lease.getOwner()); throw new LeaseLostException(lease); } return this.leaseUpdater.updateLease( lease, this.createItemForLease(lease.getId()), this.requestOptionsFactory.createRequestOptions(lease), serverLease -> { if (!serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Partition '{}' lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setProperties(lease.getProperties()); return serverLease; }); } @Override public Mono<Lease> checkpoint(Lease lease, String continuationToken) { if (lease == null) { throw new IllegalArgumentException("lease"); } if (continuationToken == null || continuationToken.isEmpty()) { throw new IllegalArgumentException("continuationToken must be a non-empty string"); } return this.leaseUpdater.updateLease( lease, this.createItemForLease(lease.getId()), this.requestOptionsFactory.createRequestOptions(lease), serverLease -> { if (serverLease.getOwner() != null && !serverLease.getOwner().equalsIgnoreCase(lease.getOwner())) { logger.info("Partition {} lease was taken over by owner '{}'", lease.getLeaseToken(), serverLease.getOwner()); throw new LeaseLostException(lease); } serverLease.setContinuationToken(continuationToken); return serverLease; }); } @Override public Mono<Boolean> isInitialized() { return this.leaseStore.isInitialized(); } @Override public Mono<Boolean> markInitialized() { return this.leaseStore.markInitialized(); } @Override public Mono<Boolean> acquireInitializationLock(Duration lockExpirationTime) { return this.leaseStore.acquireInitializationLock(lockExpirationTime); } @Override public Mono<Boolean> releaseInitializationLock() { return this.leaseStore.releaseInitializationLock(); } private Mono<ServiceItemLease> tryGetLease(Lease lease) { CosmosItem itemForLease = this.createItemForLease(lease.getId()); return this.leaseDocumentClient.readItem(itemForLease, this.requestOptionsFactory.createRequestOptions(lease)) .onErrorResume( ex -> { if (ex instanceof CosmosClientException) { CosmosClientException e = (CosmosClientException) ex; if (e.statusCode() == ChangeFeedHelper.HTTP_STATUS_CODE_NOT_FOUND) { return Mono.empty(); } } return Mono.error(ex); }) .map( documentResourceResponse -> { if (documentResourceResponse == null) return null; return ServiceItemLease.fromDocument(documentResourceResponse.properties()); }); } private Flux<ServiceItemLease> listDocuments(String prefix) { if (prefix == null || prefix.isEmpty()) { throw new IllegalArgumentException("prefix"); } SqlParameter param = new SqlParameter(); param.name("@PartitionLeasePrefix"); param.value(prefix); SqlQuerySpec querySpec = new SqlQuerySpec( "SELECT * FROM c WHERE STARTSWITH(c.id, @PartitionLeasePrefix)", new SqlParameterList(param)); Flux<FeedResponse<CosmosItemProperties>> query = this.leaseDocumentClient.queryItems( this.settings.getLeaseCollectionLink(), querySpec, this.requestOptionsFactory.createFeedOptions()); return query.flatMap( documentFeedResponse -> Flux.fromIterable(documentFeedResponse.results())) .map(ServiceItemLease::fromDocument); } private String getDocumentId(String leaseToken) { return this.getPartitionLeasePrefix() + leaseToken; } private CosmosItem createItemForLease(String leaseId) { return this.leaseDocumentClient.getContainerClient().getItem(leaseId, "/id"); } }
We should use logger.error here, and everywhere else in the tests.
public void staledLeaseAcquiring() { final String ownerFirst = "Owner_First"; final String ownerSecond = "Owner_Second"; final String leasePrefix = "TEST"; ChangeFeedProcessor changeFeedProcessorFirst = ChangeFeedProcessor.Builder() .hostName(ownerFirst) .handleChanges(docs -> { ChangeFeedProcessorTest.log.info("START processing from thread {} using host {}", Thread.currentThread().getId(), ownerFirst); ChangeFeedProcessorTest.log.info("END processing from thread {} using host {}", Thread.currentThread().getId(), ownerFirst); }) .feedContainer(createdFeedCollection) .leaseContainer(createdLeaseCollection) .options(new ChangeFeedProcessorOptions() .leasePrefix(leasePrefix) ) .build(); ChangeFeedProcessor changeFeedProcessorSecond = ChangeFeedProcessor.Builder() .hostName(ownerSecond) .handleChanges(docs -> { ChangeFeedProcessorTest.log.info("START processing from thread {} using host {}", Thread.currentThread().getId(), ownerSecond); for (CosmosItemProperties item : docs) { processItem(item); } ChangeFeedProcessorTest.log.info("END processing from thread {} using host {}", Thread.currentThread().getId(), ownerSecond); }) .feedContainer(createdFeedCollection) .leaseContainer(createdLeaseCollection) .options(new ChangeFeedProcessorOptions() .leaseRenewInterval(Duration.ofSeconds(10)) .leaseAcquireInterval(Duration.ofSeconds(5)) .leaseExpirationInterval(Duration.ofSeconds(20)) .feedPollDelay(Duration.ofSeconds(2)) .leasePrefix(leasePrefix) .maxItemCount(10) .startFromBeginning(true) .maxScaleCount(0) ) .build(); receivedDocuments = new ConcurrentHashMap<>(); try { changeFeedProcessorFirst.start().subscribeOn(Schedulers.elastic()) .timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)) .then(Mono.just(changeFeedProcessorFirst) .delayElement(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)) .flatMap( value -> changeFeedProcessorFirst.stop() .subscribeOn(Schedulers.elastic()) .timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)) )) .then(Mono.just(changeFeedProcessorFirst) .delayElement(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)) ) .doOnSuccess(aVoid -> { try { Thread.sleep(CHANGE_FEED_PROCESSOR_TIMEOUT / 2); } catch (InterruptedException e) { e.printStackTrace(); } ChangeFeedProcessorTest.log.info("Update leases for Change feed processor in thread {} using host {}", Thread.currentThread().getId(), "Owner_first"); SqlParameter param = new SqlParameter(); param.name("@PartitionLeasePrefix"); param.value(leasePrefix); SqlQuerySpec querySpec = new SqlQuerySpec( "SELECT * FROM c WHERE STARTSWITH(c.id, @PartitionLeasePrefix)", new SqlParameterList(param)); FeedOptions feedOptions = new FeedOptions(); feedOptions.enableCrossPartitionQuery(true); createdLeaseCollection.queryItems(querySpec, feedOptions) .delayElements(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT / 2)) .flatMap(documentFeedResponse -> reactor.core.publisher.Flux.fromIterable(documentFeedResponse.results())) .flatMap(doc -> { BridgeInternal.setProperty(doc, "Owner", "TEMP_OWNER"); CosmosItemRequestOptions options = new CosmosItemRequestOptions(); options.partitionKey(new PartitionKey(doc.id())); return createdLeaseCollection.getItem(doc.id(), "/id") .replace(doc, options) .map(CosmosItemResponse::properties); }) .map(ServiceItemLease::fromDocument) .map(leaseDocument -> { ChangeFeedProcessorTest.log.info("QueryItems after Change feed processor processing; found host {}", leaseDocument.getOwner()); return leaseDocument; }) .last() .delayElement(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT / 2)) .flatMap(leaseDocument -> { ChangeFeedProcessorTest.log.info("Start creating documents"); List<CosmosItemProperties> docDefList = new ArrayList<>(); for(int i = 0; i < FEED_COUNT; i++) { docDefList.add(getDocumentDefinition()); } return bulkInsert(createdFeedCollection, docDefList, FEED_COUNT) .last() .delayElement(Duration.ofMillis(1000)) .flatMap(cosmosItemResponse -> { ChangeFeedProcessorTest.log.info("Start second Change feed processor"); return changeFeedProcessorSecond.start().subscribeOn(Schedulers.elastic()) .timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)); }); }) .subscribe(); }) .subscribe(); } catch (Exception ex) { log.error("First change feed processor did not start in the expected time", ex); } long remainingWork = 40 * CHANGE_FEED_PROCESSOR_TIMEOUT; while (remainingWork > 0 && receivedDocuments.size() < FEED_COUNT) { remainingWork -= 100; try { Thread.sleep(100); } catch (InterruptedException e) { e.printStackTrace(); } } assertThat(remainingWork >= 0).as("Failed to receive all the feed documents").isTrue(); changeFeedProcessorSecond.stop().subscribeOn(Schedulers.elastic()).timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe(); try { Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT); } catch (InterruptedException e) { e.printStackTrace(); } receivedDocuments.clear(); }
e.printStackTrace();
public void staledLeaseAcquiring() { final String ownerFirst = "Owner_First"; final String ownerSecond = "Owner_Second"; final String leasePrefix = "TEST"; ChangeFeedProcessor changeFeedProcessorFirst = ChangeFeedProcessor.Builder() .hostName(ownerFirst) .handleChanges(docs -> { ChangeFeedProcessorTest.log.info("START processing from thread {} using host {}", Thread.currentThread().getId(), ownerFirst); ChangeFeedProcessorTest.log.info("END processing from thread {} using host {}", Thread.currentThread().getId(), ownerFirst); }) .feedContainer(createdFeedCollection) .leaseContainer(createdLeaseCollection) .options(new ChangeFeedProcessorOptions() .leasePrefix(leasePrefix) ) .build(); ChangeFeedProcessor changeFeedProcessorSecond = ChangeFeedProcessor.Builder() .hostName(ownerSecond) .handleChanges(docs -> { ChangeFeedProcessorTest.log.info("START processing from thread {} using host {}", Thread.currentThread().getId(), ownerSecond); for (CosmosItemProperties item : docs) { processItem(item); } ChangeFeedProcessorTest.log.info("END processing from thread {} using host {}", Thread.currentThread().getId(), ownerSecond); }) .feedContainer(createdFeedCollection) .leaseContainer(createdLeaseCollection) .options(new ChangeFeedProcessorOptions() .leaseRenewInterval(Duration.ofSeconds(10)) .leaseAcquireInterval(Duration.ofSeconds(5)) .leaseExpirationInterval(Duration.ofSeconds(20)) .feedPollDelay(Duration.ofSeconds(2)) .leasePrefix(leasePrefix) .maxItemCount(10) .startFromBeginning(true) .maxScaleCount(0) ) .build(); receivedDocuments = new ConcurrentHashMap<>(); try { changeFeedProcessorFirst.start().subscribeOn(Schedulers.elastic()) .timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)) .then(Mono.just(changeFeedProcessorFirst) .delayElement(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)) .flatMap( value -> changeFeedProcessorFirst.stop() .subscribeOn(Schedulers.elastic()) .timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)) )) .then(Mono.just(changeFeedProcessorFirst) .delayElement(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)) ) .doOnSuccess(aVoid -> { try { Thread.sleep(CHANGE_FEED_PROCESSOR_TIMEOUT / 2); } catch (InterruptedException e) { log.error(e.getMessage()); } ChangeFeedProcessorTest.log.info("Update leases for Change feed processor in thread {} using host {}", Thread.currentThread().getId(), "Owner_first"); SqlParameter param = new SqlParameter(); param.name("@PartitionLeasePrefix"); param.value(leasePrefix); SqlQuerySpec querySpec = new SqlQuerySpec( "SELECT * FROM c WHERE STARTSWITH(c.id, @PartitionLeasePrefix)", new SqlParameterList(param)); FeedOptions feedOptions = new FeedOptions(); feedOptions.enableCrossPartitionQuery(true); createdLeaseCollection.queryItems(querySpec, feedOptions) .delayElements(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT / 2)) .flatMap(documentFeedResponse -> reactor.core.publisher.Flux.fromIterable(documentFeedResponse.results())) .flatMap(doc -> { BridgeInternal.setProperty(doc, "Owner", "TEMP_OWNER"); CosmosItemRequestOptions options = new CosmosItemRequestOptions(); options.partitionKey(new PartitionKey(doc.id())); return createdLeaseCollection.getItem(doc.id(), "/id") .replace(doc, options) .map(CosmosItemResponse::properties); }) .map(ServiceItemLease::fromDocument) .map(leaseDocument -> { ChangeFeedProcessorTest.log.info("QueryItems after Change feed processor processing; found host {}", leaseDocument.getOwner()); return leaseDocument; }) .last() .delayElement(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT / 2)) .flatMap(leaseDocument -> { ChangeFeedProcessorTest.log.info("Start creating documents"); List<CosmosItemProperties> docDefList = new ArrayList<>(); for(int i = 0; i < FEED_COUNT; i++) { docDefList.add(getDocumentDefinition()); } return bulkInsert(createdFeedCollection, docDefList, FEED_COUNT) .last() .delayElement(Duration.ofMillis(1000)) .flatMap(cosmosItemResponse -> { ChangeFeedProcessorTest.log.info("Start second Change feed processor"); return changeFeedProcessorSecond.start().subscribeOn(Schedulers.elastic()) .timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)); }); }) .subscribe(); }) .subscribe(); } catch (Exception ex) { log.error("First change feed processor did not start in the expected time", ex); } long remainingWork = 40 * CHANGE_FEED_PROCESSOR_TIMEOUT; while (remainingWork > 0 && receivedDocuments.size() < FEED_COUNT) { remainingWork -= 100; try { Thread.sleep(100); } catch (InterruptedException e) { log.error(e.getMessage()); } } assertThat(remainingWork >= 0).as("Failed to receive all the feed documents").isTrue(); changeFeedProcessorSecond.stop().subscribeOn(Schedulers.elastic()).timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe(); try { Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT); } catch (InterruptedException e) { log.error(e.getMessage()); } receivedDocuments.clear(); }
class ChangeFeedProcessorTest extends TestSuiteBase { private final static Logger log = LoggerFactory.getLogger(ChangeFeedProcessorTest.class); private CosmosDatabase createdDatabase; private CosmosContainer createdFeedCollection; private CosmosContainer createdLeaseCollection; private List<CosmosItemProperties> createdDocuments; private static Map<String, CosmosItemProperties> receivedDocuments; private final String hostName = RandomStringUtils.randomAlphabetic(6); private final int FEED_COUNT = 10; private final int CHANGE_FEED_PROCESSOR_TIMEOUT = 5000; private CosmosClient client; private ChangeFeedProcessor changeFeedProcessor; @Factory(dataProvider = "clientBuilders") public ChangeFeedProcessorTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void readFeedDocumentsStartFromBeginning() { setupReadFeedDocuments(); changeFeedProcessor = ChangeFeedProcessor.Builder() .hostName(hostName) .handleChanges(docs -> { ChangeFeedProcessorTest.log.info("START processing from thread {}", Thread.currentThread().getId()); for (CosmosItemProperties item : docs) { processItem(item); } ChangeFeedProcessorTest.log.info("END processing from thread {}", Thread.currentThread().getId()); }) .feedContainer(createdFeedCollection) .leaseContainer(createdLeaseCollection) .options(new ChangeFeedProcessorOptions() .leaseRenewInterval(Duration.ofSeconds(20)) .leaseAcquireInterval(Duration.ofSeconds(10)) .leaseExpirationInterval(Duration.ofSeconds(30)) .feedPollDelay(Duration.ofSeconds(2)) .leasePrefix("TEST") .maxItemCount(10) .startFromBeginning(true) .maxScaleCount(0) .discardExistingLeases(true) ) .build(); try { changeFeedProcessor.start().subscribeOn(Schedulers.elastic()) .timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)) .subscribe(); } catch (Exception ex) { log.error("Change feed processor did not start in the expected time", ex); } try { Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT); } catch (InterruptedException e) { e.printStackTrace(); } changeFeedProcessor.stop().subscribeOn(Schedulers.elastic()).timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe(); for (CosmosItemProperties item : createdDocuments) { assertThat(receivedDocuments.containsKey(item.id())).as("Document with id: " + item.id()).isTrue(); } try { Thread.sleep(CHANGE_FEED_PROCESSOR_TIMEOUT); } catch (InterruptedException e) { e.printStackTrace(); } receivedDocuments.clear(); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void readFeedDocumentsStartFromCustomDate() { ChangeFeedProcessor changeFeedProcessor = ChangeFeedProcessor.Builder() .hostName(hostName) .handleChanges(docs -> { ChangeFeedProcessorTest.log.info("START processing from thread {}", Thread.currentThread().getId()); for (CosmosItemProperties item : docs) { processItem(item); } ChangeFeedProcessorTest.log.info("END processing from thread {}", Thread.currentThread().getId()); }) .feedContainer(createdFeedCollection) .leaseContainer(createdLeaseCollection) .options(new ChangeFeedProcessorOptions() .leaseRenewInterval(Duration.ofSeconds(20)) .leaseAcquireInterval(Duration.ofSeconds(10)) .leaseExpirationInterval(Duration.ofSeconds(30)) .feedPollDelay(Duration.ofSeconds(1)) .leasePrefix("TEST") .maxItemCount(10) .startTime(OffsetDateTime.now().minusDays(1)) .minScaleCount(1) .maxScaleCount(3) .discardExistingLeases(true) ) .build(); try { changeFeedProcessor.start().subscribeOn(Schedulers.elastic()) .timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)) .subscribe(); } catch (Exception ex) { log.error("Change feed processor did not start in the expected time", ex); } setupReadFeedDocuments(); long remainingWork = FEED_TIMEOUT; while (remainingWork > 0 && receivedDocuments.size() < FEED_COUNT) { remainingWork -= 100; try { Thread.sleep(100); } catch (InterruptedException e) { e.printStackTrace(); } } assertThat(remainingWork >= 0).as("Failed to receive all the feed documents").isTrue(); changeFeedProcessor.stop().subscribeOn(Schedulers.elastic()).timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe(); for (CosmosItemProperties item : createdDocuments) { assertThat(receivedDocuments.containsKey(item.id())).as("Document with id: " + item.id()).isTrue(); } try { Thread.sleep(CHANGE_FEED_PROCESSOR_TIMEOUT); } catch (InterruptedException e) { e.printStackTrace(); } receivedDocuments.clear(); } @Test(groups = { "emulator" }, timeOut = 40 * CHANGE_FEED_PROCESSOR_TIMEOUT) @BeforeMethod(groups = { "emulator" }, timeOut = 2 * SETUP_TIMEOUT, alwaysRun = true) public void beforeMethod() { createdFeedCollection = createFeedCollection(); createdLeaseCollection = createLeaseCollection(); } @BeforeClass(groups = { "emulator" }, timeOut = SETUP_TIMEOUT, alwaysRun = true) public void beforeClass() { client = clientBuilder().build(); createdDatabase = getSharedCosmosDatabase(client); } @AfterMethod(groups = { "emulator" }, timeOut = 3 * SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterMethod() { safeDeleteCollection(createdFeedCollection); safeDeleteCollection(createdLeaseCollection); try { Thread.sleep(500); } catch (Exception e){ } } @AfterClass(groups = { "emulator" }, timeOut = 2 * SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private void setupReadFeedDocuments() { receivedDocuments = new ConcurrentHashMap<>(); List<CosmosItemProperties> docDefList = new ArrayList<>(); for(int i = 0; i < FEED_COUNT; i++) { docDefList.add(getDocumentDefinition()); } createdDocuments = bulkInsertBlocking(createdFeedCollection, docDefList); waitIfNeededForReplicasToCatchUp(clientBuilder()); } private CosmosItemProperties getDocumentDefinition() { String uuid = UUID.randomUUID().toString(); CosmosItemProperties doc = new CosmosItemProperties(String.format("{ " + "\"id\": \"%s\", " + "\"mypk\": \"%s\", " + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + "}" , uuid, uuid)); return doc; } private CosmosContainer createFeedCollection() { CosmosContainerRequestOptions optionsFeedCollection = new CosmosContainerRequestOptions(); return createCollection(createdDatabase, getCollectionDefinition(), optionsFeedCollection, 10100); } private CosmosContainer createLeaseCollection() { CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(UUID.randomUUID().toString(), "/id"); return createCollection(createdDatabase, collectionDefinition, options, 400); } private static synchronized void processItem(CosmosItemProperties item) { ChangeFeedProcessorTest.log.info("RECEIVED {}", item.toJson(SerializationFormattingPolicy.INDENTED)); receivedDocuments.put(item.id(), item); } }
class ChangeFeedProcessorTest extends TestSuiteBase { private final static Logger log = LoggerFactory.getLogger(ChangeFeedProcessorTest.class); private CosmosDatabase createdDatabase; private CosmosContainer createdFeedCollection; private CosmosContainer createdLeaseCollection; private List<CosmosItemProperties> createdDocuments; private static Map<String, CosmosItemProperties> receivedDocuments; private final String hostName = RandomStringUtils.randomAlphabetic(6); private final int FEED_COUNT = 10; private final int CHANGE_FEED_PROCESSOR_TIMEOUT = 5000; private CosmosClient client; private ChangeFeedProcessor changeFeedProcessor; @Factory(dataProvider = "clientBuilders") public ChangeFeedProcessorTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void readFeedDocumentsStartFromBeginning() { setupReadFeedDocuments(); changeFeedProcessor = ChangeFeedProcessor.Builder() .hostName(hostName) .handleChanges(docs -> { ChangeFeedProcessorTest.log.info("START processing from thread {}", Thread.currentThread().getId()); for (CosmosItemProperties item : docs) { processItem(item); } ChangeFeedProcessorTest.log.info("END processing from thread {}", Thread.currentThread().getId()); }) .feedContainer(createdFeedCollection) .leaseContainer(createdLeaseCollection) .options(new ChangeFeedProcessorOptions() .leaseRenewInterval(Duration.ofSeconds(20)) .leaseAcquireInterval(Duration.ofSeconds(10)) .leaseExpirationInterval(Duration.ofSeconds(30)) .feedPollDelay(Duration.ofSeconds(2)) .leasePrefix("TEST") .maxItemCount(10) .startFromBeginning(true) .maxScaleCount(0) .discardExistingLeases(true) ) .build(); try { changeFeedProcessor.start().subscribeOn(Schedulers.elastic()) .timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)) .subscribe(); } catch (Exception ex) { log.error("Change feed processor did not start in the expected time", ex); } try { Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT); } catch (InterruptedException e) { log.error(e.getMessage()); } changeFeedProcessor.stop().subscribeOn(Schedulers.elastic()).timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe(); for (CosmosItemProperties item : createdDocuments) { assertThat(receivedDocuments.containsKey(item.id())).as("Document with id: " + item.id()).isTrue(); } try { Thread.sleep(CHANGE_FEED_PROCESSOR_TIMEOUT); } catch (InterruptedException e) { log.error(e.getMessage()); } receivedDocuments.clear(); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void readFeedDocumentsStartFromCustomDate() { ChangeFeedProcessor changeFeedProcessor = ChangeFeedProcessor.Builder() .hostName(hostName) .handleChanges(docs -> { ChangeFeedProcessorTest.log.info("START processing from thread {}", Thread.currentThread().getId()); for (CosmosItemProperties item : docs) { processItem(item); } ChangeFeedProcessorTest.log.info("END processing from thread {}", Thread.currentThread().getId()); }) .feedContainer(createdFeedCollection) .leaseContainer(createdLeaseCollection) .options(new ChangeFeedProcessorOptions() .leaseRenewInterval(Duration.ofSeconds(20)) .leaseAcquireInterval(Duration.ofSeconds(10)) .leaseExpirationInterval(Duration.ofSeconds(30)) .feedPollDelay(Duration.ofSeconds(1)) .leasePrefix("TEST") .maxItemCount(10) .startTime(OffsetDateTime.now().minusDays(1)) .minScaleCount(1) .maxScaleCount(3) .discardExistingLeases(true) ) .build(); try { changeFeedProcessor.start().subscribeOn(Schedulers.elastic()) .timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)) .subscribe(); } catch (Exception ex) { log.error("Change feed processor did not start in the expected time", ex); } setupReadFeedDocuments(); long remainingWork = FEED_TIMEOUT; while (remainingWork > 0 && receivedDocuments.size() < FEED_COUNT) { remainingWork -= 100; try { Thread.sleep(100); } catch (InterruptedException e) { log.error(e.getMessage()); } } assertThat(remainingWork >= 0).as("Failed to receive all the feed documents").isTrue(); changeFeedProcessor.stop().subscribeOn(Schedulers.elastic()).timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe(); for (CosmosItemProperties item : createdDocuments) { assertThat(receivedDocuments.containsKey(item.id())).as("Document with id: " + item.id()).isTrue(); } try { Thread.sleep(CHANGE_FEED_PROCESSOR_TIMEOUT); } catch (InterruptedException e) { log.error(e.getMessage()); } receivedDocuments.clear(); } @Test(groups = { "emulator" }, timeOut = 40 * CHANGE_FEED_PROCESSOR_TIMEOUT) @BeforeMethod(groups = { "emulator" }, timeOut = 2 * SETUP_TIMEOUT, alwaysRun = true) public void beforeMethod() { createdFeedCollection = createFeedCollection(); createdLeaseCollection = createLeaseCollection(); } @BeforeClass(groups = { "emulator" }, timeOut = SETUP_TIMEOUT, alwaysRun = true) public void beforeClass() { client = clientBuilder().build(); createdDatabase = getSharedCosmosDatabase(client); } @AfterMethod(groups = { "emulator" }, timeOut = 3 * SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterMethod() { safeDeleteCollection(createdFeedCollection); safeDeleteCollection(createdLeaseCollection); try { Thread.sleep(500); } catch (Exception e){ } } @AfterClass(groups = { "emulator" }, timeOut = 2 * SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private void setupReadFeedDocuments() { receivedDocuments = new ConcurrentHashMap<>(); List<CosmosItemProperties> docDefList = new ArrayList<>(); for(int i = 0; i < FEED_COUNT; i++) { docDefList.add(getDocumentDefinition()); } createdDocuments = bulkInsertBlocking(createdFeedCollection, docDefList); waitIfNeededForReplicasToCatchUp(clientBuilder()); } private CosmosItemProperties getDocumentDefinition() { String uuid = UUID.randomUUID().toString(); CosmosItemProperties doc = new CosmosItemProperties(String.format("{ " + "\"id\": \"%s\", " + "\"mypk\": \"%s\", " + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + "}" , uuid, uuid)); return doc; } private CosmosContainer createFeedCollection() { CosmosContainerRequestOptions optionsFeedCollection = new CosmosContainerRequestOptions(); return createCollection(createdDatabase, getCollectionDefinition(), optionsFeedCollection, 10100); } private CosmosContainer createLeaseCollection() { CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(UUID.randomUUID().toString(), "/id"); return createCollection(createdDatabase, collectionDefinition, options, 400); } private static synchronized void processItem(CosmosItemProperties item) { ChangeFeedProcessorTest.log.info("RECEIVED {}", item.toJson(SerializationFormattingPolicy.INDENTED)); receivedDocuments.put(item.id(), item); } }
fixed
public void staledLeaseAcquiring() { final String ownerFirst = "Owner_First"; final String ownerSecond = "Owner_Second"; final String leasePrefix = "TEST"; ChangeFeedProcessor changeFeedProcessorFirst = ChangeFeedProcessor.Builder() .hostName(ownerFirst) .handleChanges(docs -> { ChangeFeedProcessorTest.log.info("START processing from thread {} using host {}", Thread.currentThread().getId(), ownerFirst); ChangeFeedProcessorTest.log.info("END processing from thread {} using host {}", Thread.currentThread().getId(), ownerFirst); }) .feedContainer(createdFeedCollection) .leaseContainer(createdLeaseCollection) .options(new ChangeFeedProcessorOptions() .leasePrefix(leasePrefix) ) .build(); ChangeFeedProcessor changeFeedProcessorSecond = ChangeFeedProcessor.Builder() .hostName(ownerSecond) .handleChanges(docs -> { ChangeFeedProcessorTest.log.info("START processing from thread {} using host {}", Thread.currentThread().getId(), ownerSecond); for (CosmosItemProperties item : docs) { processItem(item); } ChangeFeedProcessorTest.log.info("END processing from thread {} using host {}", Thread.currentThread().getId(), ownerSecond); }) .feedContainer(createdFeedCollection) .leaseContainer(createdLeaseCollection) .options(new ChangeFeedProcessorOptions() .leaseRenewInterval(Duration.ofSeconds(10)) .leaseAcquireInterval(Duration.ofSeconds(5)) .leaseExpirationInterval(Duration.ofSeconds(20)) .feedPollDelay(Duration.ofSeconds(2)) .leasePrefix(leasePrefix) .maxItemCount(10) .startFromBeginning(true) .maxScaleCount(0) ) .build(); receivedDocuments = new ConcurrentHashMap<>(); try { changeFeedProcessorFirst.start().subscribeOn(Schedulers.elastic()) .timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)) .then(Mono.just(changeFeedProcessorFirst) .delayElement(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)) .flatMap( value -> changeFeedProcessorFirst.stop() .subscribeOn(Schedulers.elastic()) .timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)) )) .then(Mono.just(changeFeedProcessorFirst) .delayElement(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)) ) .doOnSuccess(aVoid -> { try { Thread.sleep(CHANGE_FEED_PROCESSOR_TIMEOUT / 2); } catch (InterruptedException e) { e.printStackTrace(); } ChangeFeedProcessorTest.log.info("Update leases for Change feed processor in thread {} using host {}", Thread.currentThread().getId(), "Owner_first"); SqlParameter param = new SqlParameter(); param.name("@PartitionLeasePrefix"); param.value(leasePrefix); SqlQuerySpec querySpec = new SqlQuerySpec( "SELECT * FROM c WHERE STARTSWITH(c.id, @PartitionLeasePrefix)", new SqlParameterList(param)); FeedOptions feedOptions = new FeedOptions(); feedOptions.enableCrossPartitionQuery(true); createdLeaseCollection.queryItems(querySpec, feedOptions) .delayElements(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT / 2)) .flatMap(documentFeedResponse -> reactor.core.publisher.Flux.fromIterable(documentFeedResponse.results())) .flatMap(doc -> { BridgeInternal.setProperty(doc, "Owner", "TEMP_OWNER"); CosmosItemRequestOptions options = new CosmosItemRequestOptions(); options.partitionKey(new PartitionKey(doc.id())); return createdLeaseCollection.getItem(doc.id(), "/id") .replace(doc, options) .map(CosmosItemResponse::properties); }) .map(ServiceItemLease::fromDocument) .map(leaseDocument -> { ChangeFeedProcessorTest.log.info("QueryItems after Change feed processor processing; found host {}", leaseDocument.getOwner()); return leaseDocument; }) .last() .delayElement(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT / 2)) .flatMap(leaseDocument -> { ChangeFeedProcessorTest.log.info("Start creating documents"); List<CosmosItemProperties> docDefList = new ArrayList<>(); for(int i = 0; i < FEED_COUNT; i++) { docDefList.add(getDocumentDefinition()); } return bulkInsert(createdFeedCollection, docDefList, FEED_COUNT) .last() .delayElement(Duration.ofMillis(1000)) .flatMap(cosmosItemResponse -> { ChangeFeedProcessorTest.log.info("Start second Change feed processor"); return changeFeedProcessorSecond.start().subscribeOn(Schedulers.elastic()) .timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)); }); }) .subscribe(); }) .subscribe(); } catch (Exception ex) { log.error("First change feed processor did not start in the expected time", ex); } long remainingWork = 40 * CHANGE_FEED_PROCESSOR_TIMEOUT; while (remainingWork > 0 && receivedDocuments.size() < FEED_COUNT) { remainingWork -= 100; try { Thread.sleep(100); } catch (InterruptedException e) { e.printStackTrace(); } } assertThat(remainingWork >= 0).as("Failed to receive all the feed documents").isTrue(); changeFeedProcessorSecond.stop().subscribeOn(Schedulers.elastic()).timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe(); try { Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT); } catch (InterruptedException e) { e.printStackTrace(); } receivedDocuments.clear(); }
e.printStackTrace();
public void staledLeaseAcquiring() { final String ownerFirst = "Owner_First"; final String ownerSecond = "Owner_Second"; final String leasePrefix = "TEST"; ChangeFeedProcessor changeFeedProcessorFirst = ChangeFeedProcessor.Builder() .hostName(ownerFirst) .handleChanges(docs -> { ChangeFeedProcessorTest.log.info("START processing from thread {} using host {}", Thread.currentThread().getId(), ownerFirst); ChangeFeedProcessorTest.log.info("END processing from thread {} using host {}", Thread.currentThread().getId(), ownerFirst); }) .feedContainer(createdFeedCollection) .leaseContainer(createdLeaseCollection) .options(new ChangeFeedProcessorOptions() .leasePrefix(leasePrefix) ) .build(); ChangeFeedProcessor changeFeedProcessorSecond = ChangeFeedProcessor.Builder() .hostName(ownerSecond) .handleChanges(docs -> { ChangeFeedProcessorTest.log.info("START processing from thread {} using host {}", Thread.currentThread().getId(), ownerSecond); for (CosmosItemProperties item : docs) { processItem(item); } ChangeFeedProcessorTest.log.info("END processing from thread {} using host {}", Thread.currentThread().getId(), ownerSecond); }) .feedContainer(createdFeedCollection) .leaseContainer(createdLeaseCollection) .options(new ChangeFeedProcessorOptions() .leaseRenewInterval(Duration.ofSeconds(10)) .leaseAcquireInterval(Duration.ofSeconds(5)) .leaseExpirationInterval(Duration.ofSeconds(20)) .feedPollDelay(Duration.ofSeconds(2)) .leasePrefix(leasePrefix) .maxItemCount(10) .startFromBeginning(true) .maxScaleCount(0) ) .build(); receivedDocuments = new ConcurrentHashMap<>(); try { changeFeedProcessorFirst.start().subscribeOn(Schedulers.elastic()) .timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)) .then(Mono.just(changeFeedProcessorFirst) .delayElement(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)) .flatMap( value -> changeFeedProcessorFirst.stop() .subscribeOn(Schedulers.elastic()) .timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)) )) .then(Mono.just(changeFeedProcessorFirst) .delayElement(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)) ) .doOnSuccess(aVoid -> { try { Thread.sleep(CHANGE_FEED_PROCESSOR_TIMEOUT / 2); } catch (InterruptedException e) { log.error(e.getMessage()); } ChangeFeedProcessorTest.log.info("Update leases for Change feed processor in thread {} using host {}", Thread.currentThread().getId(), "Owner_first"); SqlParameter param = new SqlParameter(); param.name("@PartitionLeasePrefix"); param.value(leasePrefix); SqlQuerySpec querySpec = new SqlQuerySpec( "SELECT * FROM c WHERE STARTSWITH(c.id, @PartitionLeasePrefix)", new SqlParameterList(param)); FeedOptions feedOptions = new FeedOptions(); feedOptions.enableCrossPartitionQuery(true); createdLeaseCollection.queryItems(querySpec, feedOptions) .delayElements(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT / 2)) .flatMap(documentFeedResponse -> reactor.core.publisher.Flux.fromIterable(documentFeedResponse.results())) .flatMap(doc -> { BridgeInternal.setProperty(doc, "Owner", "TEMP_OWNER"); CosmosItemRequestOptions options = new CosmosItemRequestOptions(); options.partitionKey(new PartitionKey(doc.id())); return createdLeaseCollection.getItem(doc.id(), "/id") .replace(doc, options) .map(CosmosItemResponse::properties); }) .map(ServiceItemLease::fromDocument) .map(leaseDocument -> { ChangeFeedProcessorTest.log.info("QueryItems after Change feed processor processing; found host {}", leaseDocument.getOwner()); return leaseDocument; }) .last() .delayElement(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT / 2)) .flatMap(leaseDocument -> { ChangeFeedProcessorTest.log.info("Start creating documents"); List<CosmosItemProperties> docDefList = new ArrayList<>(); for(int i = 0; i < FEED_COUNT; i++) { docDefList.add(getDocumentDefinition()); } return bulkInsert(createdFeedCollection, docDefList, FEED_COUNT) .last() .delayElement(Duration.ofMillis(1000)) .flatMap(cosmosItemResponse -> { ChangeFeedProcessorTest.log.info("Start second Change feed processor"); return changeFeedProcessorSecond.start().subscribeOn(Schedulers.elastic()) .timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)); }); }) .subscribe(); }) .subscribe(); } catch (Exception ex) { log.error("First change feed processor did not start in the expected time", ex); } long remainingWork = 40 * CHANGE_FEED_PROCESSOR_TIMEOUT; while (remainingWork > 0 && receivedDocuments.size() < FEED_COUNT) { remainingWork -= 100; try { Thread.sleep(100); } catch (InterruptedException e) { log.error(e.getMessage()); } } assertThat(remainingWork >= 0).as("Failed to receive all the feed documents").isTrue(); changeFeedProcessorSecond.stop().subscribeOn(Schedulers.elastic()).timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe(); try { Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT); } catch (InterruptedException e) { log.error(e.getMessage()); } receivedDocuments.clear(); }
class ChangeFeedProcessorTest extends TestSuiteBase { private final static Logger log = LoggerFactory.getLogger(ChangeFeedProcessorTest.class); private CosmosDatabase createdDatabase; private CosmosContainer createdFeedCollection; private CosmosContainer createdLeaseCollection; private List<CosmosItemProperties> createdDocuments; private static Map<String, CosmosItemProperties> receivedDocuments; private final String hostName = RandomStringUtils.randomAlphabetic(6); private final int FEED_COUNT = 10; private final int CHANGE_FEED_PROCESSOR_TIMEOUT = 5000; private CosmosClient client; private ChangeFeedProcessor changeFeedProcessor; @Factory(dataProvider = "clientBuilders") public ChangeFeedProcessorTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void readFeedDocumentsStartFromBeginning() { setupReadFeedDocuments(); changeFeedProcessor = ChangeFeedProcessor.Builder() .hostName(hostName) .handleChanges(docs -> { ChangeFeedProcessorTest.log.info("START processing from thread {}", Thread.currentThread().getId()); for (CosmosItemProperties item : docs) { processItem(item); } ChangeFeedProcessorTest.log.info("END processing from thread {}", Thread.currentThread().getId()); }) .feedContainer(createdFeedCollection) .leaseContainer(createdLeaseCollection) .options(new ChangeFeedProcessorOptions() .leaseRenewInterval(Duration.ofSeconds(20)) .leaseAcquireInterval(Duration.ofSeconds(10)) .leaseExpirationInterval(Duration.ofSeconds(30)) .feedPollDelay(Duration.ofSeconds(2)) .leasePrefix("TEST") .maxItemCount(10) .startFromBeginning(true) .maxScaleCount(0) .discardExistingLeases(true) ) .build(); try { changeFeedProcessor.start().subscribeOn(Schedulers.elastic()) .timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)) .subscribe(); } catch (Exception ex) { log.error("Change feed processor did not start in the expected time", ex); } try { Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT); } catch (InterruptedException e) { e.printStackTrace(); } changeFeedProcessor.stop().subscribeOn(Schedulers.elastic()).timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe(); for (CosmosItemProperties item : createdDocuments) { assertThat(receivedDocuments.containsKey(item.id())).as("Document with id: " + item.id()).isTrue(); } try { Thread.sleep(CHANGE_FEED_PROCESSOR_TIMEOUT); } catch (InterruptedException e) { e.printStackTrace(); } receivedDocuments.clear(); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void readFeedDocumentsStartFromCustomDate() { ChangeFeedProcessor changeFeedProcessor = ChangeFeedProcessor.Builder() .hostName(hostName) .handleChanges(docs -> { ChangeFeedProcessorTest.log.info("START processing from thread {}", Thread.currentThread().getId()); for (CosmosItemProperties item : docs) { processItem(item); } ChangeFeedProcessorTest.log.info("END processing from thread {}", Thread.currentThread().getId()); }) .feedContainer(createdFeedCollection) .leaseContainer(createdLeaseCollection) .options(new ChangeFeedProcessorOptions() .leaseRenewInterval(Duration.ofSeconds(20)) .leaseAcquireInterval(Duration.ofSeconds(10)) .leaseExpirationInterval(Duration.ofSeconds(30)) .feedPollDelay(Duration.ofSeconds(1)) .leasePrefix("TEST") .maxItemCount(10) .startTime(OffsetDateTime.now().minusDays(1)) .minScaleCount(1) .maxScaleCount(3) .discardExistingLeases(true) ) .build(); try { changeFeedProcessor.start().subscribeOn(Schedulers.elastic()) .timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)) .subscribe(); } catch (Exception ex) { log.error("Change feed processor did not start in the expected time", ex); } setupReadFeedDocuments(); long remainingWork = FEED_TIMEOUT; while (remainingWork > 0 && receivedDocuments.size() < FEED_COUNT) { remainingWork -= 100; try { Thread.sleep(100); } catch (InterruptedException e) { e.printStackTrace(); } } assertThat(remainingWork >= 0).as("Failed to receive all the feed documents").isTrue(); changeFeedProcessor.stop().subscribeOn(Schedulers.elastic()).timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe(); for (CosmosItemProperties item : createdDocuments) { assertThat(receivedDocuments.containsKey(item.id())).as("Document with id: " + item.id()).isTrue(); } try { Thread.sleep(CHANGE_FEED_PROCESSOR_TIMEOUT); } catch (InterruptedException e) { e.printStackTrace(); } receivedDocuments.clear(); } @Test(groups = { "emulator" }, timeOut = 40 * CHANGE_FEED_PROCESSOR_TIMEOUT) @BeforeMethod(groups = { "emulator" }, timeOut = 2 * SETUP_TIMEOUT, alwaysRun = true) public void beforeMethod() { createdFeedCollection = createFeedCollection(); createdLeaseCollection = createLeaseCollection(); } @BeforeClass(groups = { "emulator" }, timeOut = SETUP_TIMEOUT, alwaysRun = true) public void beforeClass() { client = clientBuilder().build(); createdDatabase = getSharedCosmosDatabase(client); } @AfterMethod(groups = { "emulator" }, timeOut = 3 * SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterMethod() { safeDeleteCollection(createdFeedCollection); safeDeleteCollection(createdLeaseCollection); try { Thread.sleep(500); } catch (Exception e){ } } @AfterClass(groups = { "emulator" }, timeOut = 2 * SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private void setupReadFeedDocuments() { receivedDocuments = new ConcurrentHashMap<>(); List<CosmosItemProperties> docDefList = new ArrayList<>(); for(int i = 0; i < FEED_COUNT; i++) { docDefList.add(getDocumentDefinition()); } createdDocuments = bulkInsertBlocking(createdFeedCollection, docDefList); waitIfNeededForReplicasToCatchUp(clientBuilder()); } private CosmosItemProperties getDocumentDefinition() { String uuid = UUID.randomUUID().toString(); CosmosItemProperties doc = new CosmosItemProperties(String.format("{ " + "\"id\": \"%s\", " + "\"mypk\": \"%s\", " + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + "}" , uuid, uuid)); return doc; } private CosmosContainer createFeedCollection() { CosmosContainerRequestOptions optionsFeedCollection = new CosmosContainerRequestOptions(); return createCollection(createdDatabase, getCollectionDefinition(), optionsFeedCollection, 10100); } private CosmosContainer createLeaseCollection() { CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(UUID.randomUUID().toString(), "/id"); return createCollection(createdDatabase, collectionDefinition, options, 400); } private static synchronized void processItem(CosmosItemProperties item) { ChangeFeedProcessorTest.log.info("RECEIVED {}", item.toJson(SerializationFormattingPolicy.INDENTED)); receivedDocuments.put(item.id(), item); } }
class ChangeFeedProcessorTest extends TestSuiteBase { private final static Logger log = LoggerFactory.getLogger(ChangeFeedProcessorTest.class); private CosmosDatabase createdDatabase; private CosmosContainer createdFeedCollection; private CosmosContainer createdLeaseCollection; private List<CosmosItemProperties> createdDocuments; private static Map<String, CosmosItemProperties> receivedDocuments; private final String hostName = RandomStringUtils.randomAlphabetic(6); private final int FEED_COUNT = 10; private final int CHANGE_FEED_PROCESSOR_TIMEOUT = 5000; private CosmosClient client; private ChangeFeedProcessor changeFeedProcessor; @Factory(dataProvider = "clientBuilders") public ChangeFeedProcessorTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void readFeedDocumentsStartFromBeginning() { setupReadFeedDocuments(); changeFeedProcessor = ChangeFeedProcessor.Builder() .hostName(hostName) .handleChanges(docs -> { ChangeFeedProcessorTest.log.info("START processing from thread {}", Thread.currentThread().getId()); for (CosmosItemProperties item : docs) { processItem(item); } ChangeFeedProcessorTest.log.info("END processing from thread {}", Thread.currentThread().getId()); }) .feedContainer(createdFeedCollection) .leaseContainer(createdLeaseCollection) .options(new ChangeFeedProcessorOptions() .leaseRenewInterval(Duration.ofSeconds(20)) .leaseAcquireInterval(Duration.ofSeconds(10)) .leaseExpirationInterval(Duration.ofSeconds(30)) .feedPollDelay(Duration.ofSeconds(2)) .leasePrefix("TEST") .maxItemCount(10) .startFromBeginning(true) .maxScaleCount(0) .discardExistingLeases(true) ) .build(); try { changeFeedProcessor.start().subscribeOn(Schedulers.elastic()) .timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)) .subscribe(); } catch (Exception ex) { log.error("Change feed processor did not start in the expected time", ex); } try { Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT); } catch (InterruptedException e) { log.error(e.getMessage()); } changeFeedProcessor.stop().subscribeOn(Schedulers.elastic()).timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe(); for (CosmosItemProperties item : createdDocuments) { assertThat(receivedDocuments.containsKey(item.id())).as("Document with id: " + item.id()).isTrue(); } try { Thread.sleep(CHANGE_FEED_PROCESSOR_TIMEOUT); } catch (InterruptedException e) { log.error(e.getMessage()); } receivedDocuments.clear(); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void readFeedDocumentsStartFromCustomDate() { ChangeFeedProcessor changeFeedProcessor = ChangeFeedProcessor.Builder() .hostName(hostName) .handleChanges(docs -> { ChangeFeedProcessorTest.log.info("START processing from thread {}", Thread.currentThread().getId()); for (CosmosItemProperties item : docs) { processItem(item); } ChangeFeedProcessorTest.log.info("END processing from thread {}", Thread.currentThread().getId()); }) .feedContainer(createdFeedCollection) .leaseContainer(createdLeaseCollection) .options(new ChangeFeedProcessorOptions() .leaseRenewInterval(Duration.ofSeconds(20)) .leaseAcquireInterval(Duration.ofSeconds(10)) .leaseExpirationInterval(Duration.ofSeconds(30)) .feedPollDelay(Duration.ofSeconds(1)) .leasePrefix("TEST") .maxItemCount(10) .startTime(OffsetDateTime.now().minusDays(1)) .minScaleCount(1) .maxScaleCount(3) .discardExistingLeases(true) ) .build(); try { changeFeedProcessor.start().subscribeOn(Schedulers.elastic()) .timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)) .subscribe(); } catch (Exception ex) { log.error("Change feed processor did not start in the expected time", ex); } setupReadFeedDocuments(); long remainingWork = FEED_TIMEOUT; while (remainingWork > 0 && receivedDocuments.size() < FEED_COUNT) { remainingWork -= 100; try { Thread.sleep(100); } catch (InterruptedException e) { log.error(e.getMessage()); } } assertThat(remainingWork >= 0).as("Failed to receive all the feed documents").isTrue(); changeFeedProcessor.stop().subscribeOn(Schedulers.elastic()).timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe(); for (CosmosItemProperties item : createdDocuments) { assertThat(receivedDocuments.containsKey(item.id())).as("Document with id: " + item.id()).isTrue(); } try { Thread.sleep(CHANGE_FEED_PROCESSOR_TIMEOUT); } catch (InterruptedException e) { log.error(e.getMessage()); } receivedDocuments.clear(); } @Test(groups = { "emulator" }, timeOut = 40 * CHANGE_FEED_PROCESSOR_TIMEOUT) @BeforeMethod(groups = { "emulator" }, timeOut = 2 * SETUP_TIMEOUT, alwaysRun = true) public void beforeMethod() { createdFeedCollection = createFeedCollection(); createdLeaseCollection = createLeaseCollection(); } @BeforeClass(groups = { "emulator" }, timeOut = SETUP_TIMEOUT, alwaysRun = true) public void beforeClass() { client = clientBuilder().build(); createdDatabase = getSharedCosmosDatabase(client); } @AfterMethod(groups = { "emulator" }, timeOut = 3 * SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterMethod() { safeDeleteCollection(createdFeedCollection); safeDeleteCollection(createdLeaseCollection); try { Thread.sleep(500); } catch (Exception e){ } } @AfterClass(groups = { "emulator" }, timeOut = 2 * SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private void setupReadFeedDocuments() { receivedDocuments = new ConcurrentHashMap<>(); List<CosmosItemProperties> docDefList = new ArrayList<>(); for(int i = 0; i < FEED_COUNT; i++) { docDefList.add(getDocumentDefinition()); } createdDocuments = bulkInsertBlocking(createdFeedCollection, docDefList); waitIfNeededForReplicasToCatchUp(clientBuilder()); } private CosmosItemProperties getDocumentDefinition() { String uuid = UUID.randomUUID().toString(); CosmosItemProperties doc = new CosmosItemProperties(String.format("{ " + "\"id\": \"%s\", " + "\"mypk\": \"%s\", " + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + "}" , uuid, uuid)); return doc; } private CosmosContainer createFeedCollection() { CosmosContainerRequestOptions optionsFeedCollection = new CosmosContainerRequestOptions(); return createCollection(createdDatabase, getCollectionDefinition(), optionsFeedCollection, 10100); } private CosmosContainer createLeaseCollection() { CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(UUID.randomUUID().toString(), "/id"); return createCollection(createdDatabase, collectionDefinition, options, 400); } private static synchronized void processItem(CosmosItemProperties item) { ChangeFeedProcessorTest.log.info("RECEIVED {}", item.toJson(SerializationFormattingPolicy.INDENTED)); receivedDocuments.put(item.id(), item); } }
What's the reason change to C folder?
public static void main(String[] args) { String shareName = generateRandomName(); ShareClient shareClient = new ShareClientBuilder().endpoint(ENDPOINT).shareName(shareName).buildClient(); shareClient.create(); String parentDirName = generateRandomName(); shareClient.createDirectory(parentDirName); String srcFileName = generateRandomName(); FileClient srcFileClient = new FileClientBuilder().endpoint(ENDPOINT).shareName(shareName) .filePath(parentDirName + "/" + srcFileName).buildClient(); try { srcFileClient.create(1024); } catch (StorageErrorException e) { System.out.println("Failed to create source client. Reasons: " + e.getMessage()); } String dataText = "Hello, file client sample!"; ByteBuffer uploadData = ByteBuffer.wrap(dataText.getBytes(StandardCharsets.UTF_8)); try { srcFileClient.upload(uploadData, uploadData.remaining()); } catch (StorageErrorException e) { System.out.println("Failed to upload the data. Reasons: " + e.getMessage()); } String destFileName = generateRandomName(); FileClient destFileClient = new FileClientBuilder().endpoint(ENDPOINT).shareName(shareName) .filePath(parentDirName + "/" + destFileName).buildClient(); destFileClient.create(1024); URL clientURL = srcFileClient.getFileUrl(); String sourceURL = clientURL.toString() + "/" + shareName + "/" + parentDirName + "/" + srcFileName; Response<FileCopyInfo> copyResponse; try { copyResponse = destFileClient.startCopy(sourceURL, null); } catch (StorageErrorException e) { throw new RuntimeException("Failed to start the copy of source file. Reasons: " + e.getMessage()); } if (copyResponse.value().copyStatus() == CopyStatusType.PENDING) { try { destFileClient.abortCopy(copyResponse.value().copyId()); } catch (StorageErrorException e) { System.out.println("Failed to abort the copy. Reasons: " + e.getMessage()); } } String filePath = "C:/filePath/"; String uploadPath = filePath + "testfiles/" + "uploadSample.txt"; try { srcFileClient.uploadFromFile(uploadPath); } catch (StorageErrorException e) { System.out.println("Failed to upload file to storage. Reasons: " + e.getMessage()); } String downloadPath = filePath + "testfiles/" + "downloadSample.txt"; File downloadFile = new File(downloadPath); try { if (!Files.exists(downloadFile.toPath()) && !downloadFile.createNewFile()) { throw new RuntimeException("Failed to create new upload file."); } } catch (IOException e) { throw new RuntimeException("Failed to create new upload file."); } try { srcFileClient.downloadToFile(downloadPath); } catch (StorageErrorException e) { System.out.println("Failed to download file from storage. Reasons: " + e.getMessage()); } if (Files.exists(downloadFile.toPath()) && !downloadFile.delete()) { System.out.println("Failed to delete download file."); } try { Response<FileProperties> propertiesResponse = srcFileClient.getProperties(); System.out.printf("This is the eTag: %s of the file. File type is : %s.", propertiesResponse.value().eTag(), propertiesResponse.value().fileType()); } catch (StorageErrorException e) { System.out.println("Failed to get file properties. Reasons: " + e.getMessage()); } try { srcFileClient.delete(); } catch (StorageErrorException e) { System.out.println("Failed to delete the src file. Reasons: " + e.getMessage()); } shareClient.delete(); }
String filePath = "C:/filePath/";
public static void main(String[] args) { String shareName = generateRandomName(); ShareClient shareClient = new ShareClientBuilder().endpoint(ENDPOINT).shareName(shareName).buildClient(); shareClient.create(); String parentDirName = generateRandomName(); shareClient.createDirectory(parentDirName); String srcFileName = generateRandomName(); FileClient srcFileClient = new FileClientBuilder().endpoint(ENDPOINT).shareName(shareName) .filePath(parentDirName + "/" + srcFileName).buildClient(); try { srcFileClient.create(1024); } catch (StorageErrorException e) { System.out.println("Failed to create source client. Reasons: " + e.getMessage()); } String dataText = "Hello, file client sample!"; ByteBuffer uploadData = ByteBuffer.wrap(dataText.getBytes(StandardCharsets.UTF_8)); try { srcFileClient.upload(uploadData, uploadData.remaining()); } catch (StorageErrorException e) { System.out.println("Failed to upload the data. Reasons: " + e.getMessage()); } String destFileName = generateRandomName(); FileClient destFileClient = new FileClientBuilder().endpoint(ENDPOINT).shareName(shareName) .filePath(parentDirName + "/" + destFileName).buildClient(); destFileClient.create(1024); URL clientURL = srcFileClient.getFileUrl(); String sourceURL = clientURL.toString() + "/" + shareName + "/" + parentDirName + "/" + srcFileName; FileCopyInfo copyResponse = null; try { copyResponse = destFileClient.startCopy(sourceURL, null); } catch (StorageErrorException e) { throw new RuntimeException("Failed to start the copy of source file. Reasons: " + e.getMessage()); } if (copyResponse.copyStatus() == CopyStatusType.PENDING) { try { destFileClient.abortCopy(copyResponse.copyId()); } catch (StorageErrorException e) { System.out.println("Failed to abort the copy. Reasons: " + e.getMessage()); } } String filePath = "C:/filePath/"; String uploadPath = filePath + "testfiles/" + "uploadSample.txt"; try { srcFileClient.uploadFromFile(uploadPath); } catch (StorageErrorException e) { System.out.println("Failed to upload file to storage. Reasons: " + e.getMessage()); } String downloadPath = filePath + "testfiles/" + "downloadSample.txt"; File downloadFile = new File(downloadPath); try { if (!Files.exists(downloadFile.toPath()) && !downloadFile.createNewFile()) { throw new RuntimeException("Failed to create new upload file."); } } catch (IOException e) { throw new RuntimeException("Failed to create new upload file."); } try { srcFileClient.downloadToFile(downloadPath); } catch (StorageErrorException e) { System.out.println("Failed to download file from storage. Reasons: " + e.getMessage()); } if (Files.exists(downloadFile.toPath()) && !downloadFile.delete()) { System.out.println("Failed to delete download file."); } try { FileProperties propertiesResponse = srcFileClient.getProperties(); System.out.printf("This is the eTag: %s of the file. File type is : %s.", propertiesResponse.eTag(), propertiesResponse.fileType()); } catch (StorageErrorException e) { System.out.println("Failed to get file properties. Reasons: " + e.getMessage()); } try { srcFileClient.delete(); } catch (StorageErrorException e) { System.out.println("Failed to delete the src file. Reasons: " + e.getMessage()); } shareClient.delete(); }
class FileSample { private static final String ENDPOINT = ConfigurationManager.getConfiguration().get("AZURE_STORAGE_FILE_ENDPOINT"); private static String generateRandomName() { return UUID.randomUUID().toString().substring(0, 8); } /** * The main method shows how to do the base operation using file sync client. * @param args No args needed for the main method. * @throws RuntimeException If error occurs when make storage API call. */ }
class FileSample { private static final String ENDPOINT = ConfigurationManager.getConfiguration().get("AZURE_STORAGE_FILE_ENDPOINT"); private static String generateRandomName() { return UUID.randomUUID().toString().substring(0, 8); } /** * The main method shows how to do the base operation using file sync client. * @param args No args needed for the main method. * @throws RuntimeException If error occurs when make storage API call. */ }
Using the class loader get resource could lead to a NPE, given this is a sample not tying to anything real I just made it simpler.
public static void main(String[] args) { String shareName = generateRandomName(); ShareClient shareClient = new ShareClientBuilder().endpoint(ENDPOINT).shareName(shareName).buildClient(); shareClient.create(); String parentDirName = generateRandomName(); shareClient.createDirectory(parentDirName); String srcFileName = generateRandomName(); FileClient srcFileClient = new FileClientBuilder().endpoint(ENDPOINT).shareName(shareName) .filePath(parentDirName + "/" + srcFileName).buildClient(); try { srcFileClient.create(1024); } catch (StorageErrorException e) { System.out.println("Failed to create source client. Reasons: " + e.getMessage()); } String dataText = "Hello, file client sample!"; ByteBuffer uploadData = ByteBuffer.wrap(dataText.getBytes(StandardCharsets.UTF_8)); try { srcFileClient.upload(uploadData, uploadData.remaining()); } catch (StorageErrorException e) { System.out.println("Failed to upload the data. Reasons: " + e.getMessage()); } String destFileName = generateRandomName(); FileClient destFileClient = new FileClientBuilder().endpoint(ENDPOINT).shareName(shareName) .filePath(parentDirName + "/" + destFileName).buildClient(); destFileClient.create(1024); URL clientURL = srcFileClient.getFileUrl(); String sourceURL = clientURL.toString() + "/" + shareName + "/" + parentDirName + "/" + srcFileName; Response<FileCopyInfo> copyResponse; try { copyResponse = destFileClient.startCopy(sourceURL, null); } catch (StorageErrorException e) { throw new RuntimeException("Failed to start the copy of source file. Reasons: " + e.getMessage()); } if (copyResponse.value().copyStatus() == CopyStatusType.PENDING) { try { destFileClient.abortCopy(copyResponse.value().copyId()); } catch (StorageErrorException e) { System.out.println("Failed to abort the copy. Reasons: " + e.getMessage()); } } String filePath = "C:/filePath/"; String uploadPath = filePath + "testfiles/" + "uploadSample.txt"; try { srcFileClient.uploadFromFile(uploadPath); } catch (StorageErrorException e) { System.out.println("Failed to upload file to storage. Reasons: " + e.getMessage()); } String downloadPath = filePath + "testfiles/" + "downloadSample.txt"; File downloadFile = new File(downloadPath); try { if (!Files.exists(downloadFile.toPath()) && !downloadFile.createNewFile()) { throw new RuntimeException("Failed to create new upload file."); } } catch (IOException e) { throw new RuntimeException("Failed to create new upload file."); } try { srcFileClient.downloadToFile(downloadPath); } catch (StorageErrorException e) { System.out.println("Failed to download file from storage. Reasons: " + e.getMessage()); } if (Files.exists(downloadFile.toPath()) && !downloadFile.delete()) { System.out.println("Failed to delete download file."); } try { Response<FileProperties> propertiesResponse = srcFileClient.getProperties(); System.out.printf("This is the eTag: %s of the file. File type is : %s.", propertiesResponse.value().eTag(), propertiesResponse.value().fileType()); } catch (StorageErrorException e) { System.out.println("Failed to get file properties. Reasons: " + e.getMessage()); } try { srcFileClient.delete(); } catch (StorageErrorException e) { System.out.println("Failed to delete the src file. Reasons: " + e.getMessage()); } shareClient.delete(); }
String filePath = "C:/filePath/";
public static void main(String[] args) { String shareName = generateRandomName(); ShareClient shareClient = new ShareClientBuilder().endpoint(ENDPOINT).shareName(shareName).buildClient(); shareClient.create(); String parentDirName = generateRandomName(); shareClient.createDirectory(parentDirName); String srcFileName = generateRandomName(); FileClient srcFileClient = new FileClientBuilder().endpoint(ENDPOINT).shareName(shareName) .filePath(parentDirName + "/" + srcFileName).buildClient(); try { srcFileClient.create(1024); } catch (StorageErrorException e) { System.out.println("Failed to create source client. Reasons: " + e.getMessage()); } String dataText = "Hello, file client sample!"; ByteBuffer uploadData = ByteBuffer.wrap(dataText.getBytes(StandardCharsets.UTF_8)); try { srcFileClient.upload(uploadData, uploadData.remaining()); } catch (StorageErrorException e) { System.out.println("Failed to upload the data. Reasons: " + e.getMessage()); } String destFileName = generateRandomName(); FileClient destFileClient = new FileClientBuilder().endpoint(ENDPOINT).shareName(shareName) .filePath(parentDirName + "/" + destFileName).buildClient(); destFileClient.create(1024); URL clientURL = srcFileClient.getFileUrl(); String sourceURL = clientURL.toString() + "/" + shareName + "/" + parentDirName + "/" + srcFileName; FileCopyInfo copyResponse = null; try { copyResponse = destFileClient.startCopy(sourceURL, null); } catch (StorageErrorException e) { throw new RuntimeException("Failed to start the copy of source file. Reasons: " + e.getMessage()); } if (copyResponse.copyStatus() == CopyStatusType.PENDING) { try { destFileClient.abortCopy(copyResponse.copyId()); } catch (StorageErrorException e) { System.out.println("Failed to abort the copy. Reasons: " + e.getMessage()); } } String filePath = "C:/filePath/"; String uploadPath = filePath + "testfiles/" + "uploadSample.txt"; try { srcFileClient.uploadFromFile(uploadPath); } catch (StorageErrorException e) { System.out.println("Failed to upload file to storage. Reasons: " + e.getMessage()); } String downloadPath = filePath + "testfiles/" + "downloadSample.txt"; File downloadFile = new File(downloadPath); try { if (!Files.exists(downloadFile.toPath()) && !downloadFile.createNewFile()) { throw new RuntimeException("Failed to create new upload file."); } } catch (IOException e) { throw new RuntimeException("Failed to create new upload file."); } try { srcFileClient.downloadToFile(downloadPath); } catch (StorageErrorException e) { System.out.println("Failed to download file from storage. Reasons: " + e.getMessage()); } if (Files.exists(downloadFile.toPath()) && !downloadFile.delete()) { System.out.println("Failed to delete download file."); } try { FileProperties propertiesResponse = srcFileClient.getProperties(); System.out.printf("This is the eTag: %s of the file. File type is : %s.", propertiesResponse.eTag(), propertiesResponse.fileType()); } catch (StorageErrorException e) { System.out.println("Failed to get file properties. Reasons: " + e.getMessage()); } try { srcFileClient.delete(); } catch (StorageErrorException e) { System.out.println("Failed to delete the src file. Reasons: " + e.getMessage()); } shareClient.delete(); }
class FileSample { private static final String ENDPOINT = ConfigurationManager.getConfiguration().get("AZURE_STORAGE_FILE_ENDPOINT"); private static String generateRandomName() { return UUID.randomUUID().toString().substring(0, 8); } /** * The main method shows how to do the base operation using file sync client. * @param args No args needed for the main method. * @throws RuntimeException If error occurs when make storage API call. */ }
class FileSample { private static final String ENDPOINT = ConfigurationManager.getConfiguration().get("AZURE_STORAGE_FILE_ENDPOINT"); private static String generateRandomName() { return UUID.randomUUID().toString().substring(0, 8); } /** * The main method shows how to do the base operation using file sync client. * @param args No args needed for the main method. * @throws RuntimeException If error occurs when make storage API call. */ }
Nice 👍
public EventData(String body) { this(body.getBytes(UTF_8)); }
}
public EventData(String body) { this(body.getBytes(UTF_8)); }
class EventData implements Comparable<EventData> { /* * These are properties owned by the service and set when a message is received. */ public static final Set<String> RESERVED_SYSTEM_PROPERTIES; private final ClientLogger logger = new ClientLogger(EventData.class); private final Map<String, Object> properties; private final ByteBuffer body; private final SystemProperties systemProperties; static { final Set<String> properties = new HashSet<>(); properties.add(OFFSET_ANNOTATION_NAME.getValue()); properties.add(PARTITION_KEY_ANNOTATION_NAME.getValue()); properties.add(SEQUENCE_NUMBER_ANNOTATION_NAME.getValue()); properties.add(ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue()); properties.add(PUBLISHER_ANNOTATION_NAME.getValue()); RESERVED_SYSTEM_PROPERTIES = Collections.unmodifiableSet(properties); } /** * Creates an event containing the {@code data}. * * @param body The data to set for this event. */ public EventData(byte[] body) { this(ByteBuffer.wrap(body)); } /** * Creates an event containing the {@code body}. * * @param body The data to set for this event. * @throws NullPointerException if {@code body} is {@code null}. */ public EventData(ByteBuffer body) { Objects.requireNonNull(body); this.body = body; this.properties = new HashMap<>(); this.systemProperties = new SystemProperties(Collections.emptyMap()); } /** * Creates an event by encoding the {@code body} using UTF-8 charset. * * @param body The string that will be UTF-8 encoded to create an event. */ /* * Creates an event from a message */ EventData(Message message) { if (message == null) { throw new IllegalArgumentException("'message' cannot be null"); } final Map<Symbol, Object> messageAnnotations = message.getMessageAnnotations().getValue(); final HashMap<String, Object> receiveProperties = new HashMap<>(); for (Map.Entry<Symbol, Object> annotation : messageAnnotations.entrySet()) { receiveProperties.put(annotation.getKey().toString(), annotation.getValue()); } if (message.getProperties() != null) { addMapEntry(receiveProperties, MessageConstant.MESSAGE_ID, message.getMessageId()); addMapEntry(receiveProperties, MessageConstant.USER_ID, message.getUserId()); addMapEntry(receiveProperties, MessageConstant.TO, message.getAddress()); addMapEntry(receiveProperties, MessageConstant.SUBJECT, message.getSubject()); addMapEntry(receiveProperties, MessageConstant.REPLY_TO, message.getReplyTo()); addMapEntry(receiveProperties, MessageConstant.CORRELATION_ID, message.getCorrelationId()); addMapEntry(receiveProperties, MessageConstant.CONTENT_TYPE, message.getContentType()); addMapEntry(receiveProperties, MessageConstant.CONTENT_ENCODING, message.getContentEncoding()); addMapEntry(receiveProperties, MessageConstant.ABSOLUTE_EXPIRY_TIME, message.getExpiryTime()); addMapEntry(receiveProperties, MessageConstant.CREATION_TIME, message.getCreationTime()); addMapEntry(receiveProperties, MessageConstant.GROUP_ID, message.getGroupId()); addMapEntry(receiveProperties, MessageConstant.GROUP_SEQUENCE, message.getGroupSequence()); addMapEntry(receiveProperties, MessageConstant.REPLY_TO_GROUP_ID, message.getReplyToGroupId()); } this.systemProperties = new SystemProperties(receiveProperties); this.properties = message.getApplicationProperties() == null ? new HashMap<>() : message.getApplicationProperties().getValue(); final Section bodySection = message.getBody(); if (bodySection instanceof Data) { Data bodyData = (Data) bodySection; this.body = bodyData.getValue().asByteBuffer(); } else { logger.warning(String.format(Locale.US, "Message body type is not of type Data, but type: %s. Not setting body contents.", bodySection != null ? bodySection.getType() : "null")); this.body = null; } message.clear(); } /** * Adds a piece of metadata to the event, allowing publishers to offer additional information to event consumers. If * the {@code key} exists in the map, its existing value is overwritten. * * <p> * A common use case for {@link * to consumers who wish to deserialize the binary data. * </p> * * <p> * <strong>Adding serialization hint using {@code addProperty(String, Object)}</strong> * </p> * * {@codesnippet com.azure.messaging.eventhubs.eventdata.addProperty * * @param key The key for this application property * @param value The value for this application property. * @return The updated EventData object. * @throws NullPointerException if {@code key} or {@code value} is null. */ public EventData addProperty(String key, Object value) { Objects.requireNonNull(key); Objects.requireNonNull(value); properties.put(key, value); return this; } /** * The set of free-form event properties which may be used for passing metadata associated with the event with the * event body during Event Hubs operations. * * <p> * A common use case for {@code properties()} is to associate serialization hints for the {@link * to consumers who wish to deserialize the binary data. * </p> * * <p> * <strong>Adding serialization hint using {@link * </p> * * {@codesnippet com.azure.messaging.eventhubs.eventdata.addProperty * * @return Application properties associated with this {@link EventData}. */ public Map<String, Object> properties() { return properties; } /** * Properties that are populated by EventHubService. As these are populated by Service, they are only present on a * <b>received</b> EventData. * * @return an encapsulation of all SystemProperties appended by EventHubs service into EventData. {@code null} if * the {@link EventData} is not received and is created by the public constructors. */ public Map<String, Object> systemProperties() { return systemProperties; } /** * Gets the actual payload/data wrapped by EventData. * * <p> * If the means for deserializing the raw data is not apparent to consumers, a common technique is to make use of * {@link * to deserialize the binary data. * </p> * * @return ByteBuffer representing the data. */ public ByteBuffer body() { return body.duplicate(); } /** * Returns event data as UTF-8 decoded string. * * @return UTF-8 decoded string representation of the event data. */ public String bodyAsString() { return UTF_8.decode(body).toString(); } /** * Gets the offset of the event when it was received from the associated Event Hub partition. * * @return The offset within the Event Hub partition. */ public String offset() { return systemProperties.offset(); } /** * Gets a partition key used for message partitioning. If it exists, this value was used to compute a hash to select * a partition to send the message to. * * @return A partition key for this Event Data. */ public String partitionKey() { return systemProperties.partitionKey(); } /** * Gets the instant, in UTC, of when the event was enqueued in the Event Hub partition. * * @return The instant, in UTC, this was enqueued in the Event Hub partition. */ public Instant enqueuedTime() { return systemProperties.enqueuedTime(); } /** * Gets the sequence number assigned to the event when it was enqueued in the associated Event Hub partition. This * is unique for every message received in the Event Hub partition. * * @return Sequence number for this event. * @throws IllegalStateException if {@link * retrieved event. */ public long sequenceNumber() { return systemProperties.sequenceNumber(); } private void addMapEntry(Map<String, Object> map, MessageConstant key, Object content) { if (content == null) { return; } map.put(key.getValue(), content); } /** * {@inheritDoc} */ @Override public int compareTo(EventData other) { return Long.compare( this.sequenceNumber(), other.sequenceNumber() ); } /** * {@inheritDoc} */ @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } EventData eventData = (EventData) o; return Objects.equals(body, eventData.body); } /** * {@inheritDoc} */ @Override public int hashCode() { return Objects.hash(body); } /** * A collection of properties populated by Azure Event Hubs service. */ private static class SystemProperties extends HashMap<String, Object> { private static final long serialVersionUID = -2827050124966993723L; SystemProperties(final Map<String, Object> map) { super(Collections.unmodifiableMap(map)); } /** * Gets the offset within the Event Hubs stream. * * @return The offset within the Event Hubs stream. */ private String offset() { return this.getSystemProperty(OFFSET_ANNOTATION_NAME.getValue()); } /** * Gets a partition key used for message partitioning. If it exists, this value was used to compute a hash to * select a partition to send the message to. * * @return A partition key for this Event Data. */ private String partitionKey() { return this.getSystemProperty(PARTITION_KEY_ANNOTATION_NAME.getValue()); } /** * Gets the time this event was enqueued in the Event Hub. * * @return The time this was enqueued in the service. */ private Instant enqueuedTime() { final Date enqueuedTimeValue = this.getSystemProperty(ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue()); return enqueuedTimeValue != null ? enqueuedTimeValue.toInstant() : null; } /** * Gets the sequence number in the event stream for this event. This is unique for every message received in the * Event Hub. * * @return Sequence number for this event. * @throws IllegalStateException if {@link SystemProperties} does not contain the sequence number in a * retrieved event. */ private long sequenceNumber() { final Long sequenceNumber = this.getSystemProperty(SEQUENCE_NUMBER_ANNOTATION_NAME.getValue()); if (sequenceNumber == null) { throw new IllegalStateException(String.format(Locale.US, "sequenceNumber: %s should always be in map.", SEQUENCE_NUMBER_ANNOTATION_NAME.getValue())); } return sequenceNumber; } @SuppressWarnings("unchecked") private <T> T getSystemProperty(final String key) { if (this.containsKey(key)) { return (T) (this.get(key)); } return null; } } }
class EventData implements Comparable<EventData> { /* * These are properties owned by the service and set when a message is received. */ public static final Set<String> RESERVED_SYSTEM_PROPERTIES; private final ClientLogger logger = new ClientLogger(EventData.class); private final Map<String, Object> properties; private final ByteBuffer body; private final SystemProperties systemProperties; private Context context; static { final Set<String> properties = new HashSet<>(); properties.add(OFFSET_ANNOTATION_NAME.getValue()); properties.add(PARTITION_KEY_ANNOTATION_NAME.getValue()); properties.add(SEQUENCE_NUMBER_ANNOTATION_NAME.getValue()); properties.add(ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue()); properties.add(PUBLISHER_ANNOTATION_NAME.getValue()); RESERVED_SYSTEM_PROPERTIES = Collections.unmodifiableSet(properties); } /** * Creates an event containing the {@code data}. * * @param body The data to set for this event. */ public EventData(byte[] body) { this(body, Context.NONE); } /** * Creates an event containing the {@code data}. * * @param body The data to set for this event. * @param context A specified key-value pair of type {@link Context}. * @throws NullPointerException if {@code body} or if {@code context} is {@code null}. */ public EventData(byte[] body, Context context) { this(ByteBuffer.wrap(body), context); } /** * Creates an event containing the {@code body}. * * @param body The data to set for this event. * @throws NullPointerException if {@code body} is {@code null}. */ public EventData(ByteBuffer body) { this(body, Context.NONE); } /** * Creates an event containing the {@code body}. * * @param body The data to set for this event. * @param context A specified key-value pair of type {@link Context}. * @throws NullPointerException if {@code body} or if {@code context} is {@code null}. */ public EventData(ByteBuffer body, Context context) { Objects.requireNonNull(body, "'body' cannot be null."); Objects.requireNonNull(body, "'context' cannot be null."); this.body = body; this.properties = new HashMap<>(); this.systemProperties = new SystemProperties(); this.context = context; } /** * Creates an event by encoding the {@code body} using UTF-8 charset. * * @param body The string that will be UTF-8 encoded to create an event. */ /* * Creates an event from a proton-j message * * @throws IllegalStateException if required the system properties, enqueued time, offset, or sequence number are * not found in the message. * @throws NullPointerException if {@code message} is null. */ EventData(Message message) { Objects.requireNonNull(message, "'message' cannot be null."); final Map<Symbol, Object> messageAnnotations = message.getMessageAnnotations().getValue(); final HashMap<String, Object> receiveProperties = new HashMap<>(); for (Map.Entry<Symbol, Object> annotation : messageAnnotations.entrySet()) { receiveProperties.put(annotation.getKey().toString(), annotation.getValue()); } if (message.getProperties() != null) { addMapEntry(receiveProperties, MessageConstant.MESSAGE_ID, message.getMessageId()); addMapEntry(receiveProperties, MessageConstant.USER_ID, message.getUserId()); addMapEntry(receiveProperties, MessageConstant.TO, message.getAddress()); addMapEntry(receiveProperties, MessageConstant.SUBJECT, message.getSubject()); addMapEntry(receiveProperties, MessageConstant.REPLY_TO, message.getReplyTo()); addMapEntry(receiveProperties, MessageConstant.CORRELATION_ID, message.getCorrelationId()); addMapEntry(receiveProperties, MessageConstant.CONTENT_TYPE, message.getContentType()); addMapEntry(receiveProperties, MessageConstant.CONTENT_ENCODING, message.getContentEncoding()); addMapEntry(receiveProperties, MessageConstant.ABSOLUTE_EXPIRY_TIME, message.getExpiryTime()); addMapEntry(receiveProperties, MessageConstant.CREATION_TIME, message.getCreationTime()); addMapEntry(receiveProperties, MessageConstant.GROUP_ID, message.getGroupId()); addMapEntry(receiveProperties, MessageConstant.GROUP_SEQUENCE, message.getGroupSequence()); addMapEntry(receiveProperties, MessageConstant.REPLY_TO_GROUP_ID, message.getReplyToGroupId()); } this.context = Context.NONE; this.systemProperties = new SystemProperties(receiveProperties); this.properties = message.getApplicationProperties() == null ? new HashMap<>() : message.getApplicationProperties().getValue(); final Section bodySection = message.getBody(); if (bodySection instanceof Data) { Data bodyData = (Data) bodySection; this.body = bodyData.getValue().asByteBuffer(); } else { logger.warning(String.format(Locale.US, "Message body type is not of type Data, but type: %s. Not setting body contents.", bodySection != null ? bodySection.getType() : "null")); this.body = null; } message.clear(); } /** * Adds a piece of metadata to the event, allowing publishers to offer additional information to event consumers. If * the {@code key} exists in the map, its existing value is overwritten. * * <p> * A common use case for {@link * to consumers who wish to deserialize the binary data. * </p> * * <p> * <strong>Adding serialization hint using {@code addProperty(String, Object)}</strong> * </p> * * {@codesnippet com.azure.messaging.eventhubs.eventdata.addProperty * * @param key The key for this application property * @param value The value for this application property. * @return The updated EventData object. * @throws NullPointerException if {@code key} or {@code value} is null. */ public EventData addProperty(String key, Object value) { Objects.requireNonNull(key, "'key' cannot be null."); Objects.requireNonNull(value, "'value' cannot be null."); properties.put(key, value); return this; } /** * Adds a new key value pair to the existing context on Event Data. * * @param key The key for this context object * @param value The value for this context object. * @return The updated EventData object. * @throws NullPointerException if {@code key} or {@code value} is null. */ public EventData addContext(String key, Object value) { Objects.requireNonNull(key, "The 'key' parameter cannot be null."); Objects.requireNonNull(value, "The 'value' parameter cannot be null."); this.context = context.addData(key, value); return this; } /** * The set of free-form event properties which may be used for passing metadata associated with the event with the * event body during Event Hubs operations. * * <p> * A common use case for {@code properties()} is to associate serialization hints for the {@link * to consumers who wish to deserialize the binary data. See {@link * </p> * * @return Application properties associated with this {@link EventData}. */ public Map<String, Object> properties() { return properties; } /** * A specified key-value pair of type {@link Context} to set additional information on the event. * * @return the {@link Context} object set on the event */ public Context context() { return context; } /** * Properties that are populated by EventHubService. As these are populated by Service, they are only present on a * <b>received</b> EventData. * * @return an encapsulation of all SystemProperties appended by EventHubs service into EventData. {@code null} if * the {@link EventData} is not received and is created by the public constructors. */ public Map<String, Object> systemProperties() { return systemProperties; } /** * Gets the actual payload/data wrapped by EventData. * * <p> * If the means for deserializing the raw data is not apparent to consumers, a common technique is to make use of * {@link * to deserialize the binary data. * </p> * * @return ByteBuffer representing the data. */ public ByteBuffer body() { return body.duplicate(); } /** * Returns event data as UTF-8 decoded string. * * @return UTF-8 decoded string representation of the event data. */ public String bodyAsString() { return UTF_8.decode(body).toString(); } /** * Gets the offset of the event when it was received from the associated Event Hub partition. * * @return The offset within the Event Hub partition of the received event. {@code null} if the EventData was not * received from Event Hub service. */ public Long offset() { return systemProperties.offset(); } /** * Gets a partition key used for message partitioning. If it exists, this value was used to compute a hash to select * a partition to send the message to. * * @return A partition key for this Event Data. {@code null} if the EventData was not received from Event Hub * service or there was no partition key set when the event was sent to the Event Hub. */ public String partitionKey() { return systemProperties.partitionKey(); } /** * Gets the instant, in UTC, of when the event was enqueued in the Event Hub partition. * * @return The instant, in UTC, this was enqueued in the Event Hub partition. {@code null} if the EventData was not * received from Event Hub service. */ public Instant enqueuedTime() { return systemProperties.enqueuedTime(); } /** * Gets the sequence number assigned to the event when it was enqueued in the associated Event Hub partition. This * is unique for every message received in the Event Hub partition. * * @return The sequence number for this event. {@code null} if the EventData was not received from Event Hub * service. */ public Long sequenceNumber() { return systemProperties.sequenceNumber(); } private void addMapEntry(Map<String, Object> map, MessageConstant key, Object content) { if (content == null) { return; } map.put(key.getValue(), content); } /** * {@inheritDoc} */ @Override public int compareTo(EventData other) { return Long.compare( this.sequenceNumber(), other.sequenceNumber() ); } /** * {@inheritDoc} */ @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } EventData eventData = (EventData) o; return Objects.equals(body, eventData.body); } /** * {@inheritDoc} */ @Override public int hashCode() { return Objects.hash(body); } /** * A collection of properties populated by Azure Event Hubs service. */ private static class SystemProperties extends HashMap<String, Object> { private static final long serialVersionUID = -2827050124966993723L; private final Long offset; private final String partitionKey; private final Instant enqueuedTime; private final Long sequenceNumber; SystemProperties() { super(); offset = null; partitionKey = null; enqueuedTime = null; sequenceNumber = null; } SystemProperties(final Map<String, Object> map) { super(map); this.partitionKey = removeSystemProperty(PARTITION_KEY_ANNOTATION_NAME.getValue()); final String offset = removeSystemProperty(OFFSET_ANNOTATION_NAME.getValue()); if (offset == null) { throw new IllegalStateException(String.format(Locale.US, "offset: %s should always be in map.", OFFSET_ANNOTATION_NAME.getValue())); } this.offset = Long.valueOf(offset); final Date enqueuedTimeValue = removeSystemProperty(ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue()); if (enqueuedTimeValue == null) { throw new IllegalStateException(String.format(Locale.US, "enqueuedTime: %s should always be in map.", ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue())); } this.enqueuedTime = enqueuedTimeValue.toInstant(); final Long sequenceNumber = removeSystemProperty(SEQUENCE_NUMBER_ANNOTATION_NAME.getValue()); if (sequenceNumber == null) { throw new IllegalStateException(String.format(Locale.US, "sequenceNumber: %s should always be in map.", SEQUENCE_NUMBER_ANNOTATION_NAME.getValue())); } this.sequenceNumber = sequenceNumber; } /** * Gets the offset within the Event Hubs stream. * * @return The offset within the Event Hubs stream. */ private Long offset() { return offset; } /** * Gets a partition key used for message partitioning. If it exists, this value was used to compute a hash to * select a partition to send the message to. * * @return A partition key for this Event Data. */ private String partitionKey() { return partitionKey; } /** * Gets the time this event was enqueued in the Event Hub. * * @return The time this was enqueued in the service. */ private Instant enqueuedTime() { return enqueuedTime; } /** * Gets the sequence number in the event stream for this event. This is unique for every message received in the * Event Hub. * * @return Sequence number for this event. * @throws IllegalStateException if {@link SystemProperties} does not contain the sequence number in a * retrieved event. */ private Long sequenceNumber() { return sequenceNumber; } @SuppressWarnings("unchecked") private <T> T removeSystemProperty(final String key) { if (this.containsKey(key)) { return (T) (this.remove(key)); } return null; } } }
Does this get rendered properly? iirc the underlying slf4j .warn method does not take a varargs and then a throwable.
public void processError(PartitionContext partitionContext, Throwable throwable) { logger.warning("Error occurred in partition processor for partition {} ", partitionContext.partitionId(), throwable); }
logger.warning("Error occurred in partition processor for partition {} ", partitionContext.partitionId(),
public void processError(PartitionContext partitionContext, Throwable throwable) { logger.warning("Error occurred in partition processor for partition {}", partitionContext.partitionId(), throwable); }
class PartitionProcessor { private final ClientLogger logger = new ClientLogger(PartitionProcessor.class); /** * This method is called when this {@link EventProcessor} takes ownership of a new partition and before any events * from this partition are received. * * @param partitionContext The partition information for initialization before events from the partition are * processed. * @return a representation of the deferred computation of this call. */ public Mono<Void> initialize(PartitionContext partitionContext) { logger.info("Initializing partition processor for partition {}", partitionContext.partitionId()); return Mono.empty(); } /** * This method is called when a new event is received for this partition. Processing of this event can happen * asynchronously. * * <p> * This is also a good place to update checkpoints using the {@code partitionContext} as appropriate. * * @param partitionContext The partition information the event data belongs to. * @param eventData {@link EventData} received from this partition. * @return a representation of the deferred computation of this call. */ public abstract Mono<Void> processEvent(PartitionContext partitionContext, EventData eventData); /** * This method is called when an error occurs while receiving events from Event Hub. An error also marks the end of * event data stream. * * @param partitionContext The partition information where the error occurred. * @param throwable The {@link Throwable} that caused this method to be called. */ /** * This method is called before the partition processor is closed. A partition processor could be closed for various * reasons and the reasons and implementations of this interface can take appropriate actions to cleanup before the * partition processor is shutdown. * * @param partitionContext The partition information for which the processing of events is closed. * @param closeReason The reason for closing this partition processor. * @return a representation of the deferred computation of this call. */ public Mono<Void> close(PartitionContext partitionContext, CloseReason closeReason) { logger.info("Closing partition processor for partition {} with close reason {}", partitionContext.partitionId(), closeReason); return Mono.empty(); } }
class PartitionProcessor { private final ClientLogger logger = new ClientLogger(PartitionProcessor.class); /** * This method is called when this {@link EventProcessor} takes ownership of a new partition and before any events * from this partition are received. * * @param partitionContext The partition information for initialization before events from the partition are * processed. * @return a representation of the deferred computation of this call. */ public Mono<Void> initialize(PartitionContext partitionContext) { logger.info("Initializing partition processor for partition {}", partitionContext.partitionId()); return Mono.empty(); } /** * This method is called when a new event is received for this partition. Processing of this event can happen * asynchronously. * * <p> * This is also a good place to update checkpoints using the {@code partitionContext} as appropriate. * * @param partitionContext The partition information the event data belongs to. * @param eventData {@link EventData} received from this partition. * @return a representation of the deferred computation of this call. */ public abstract Mono<Void> processEvent(PartitionContext partitionContext, EventData eventData); /** * This method is called when an error occurs while receiving events from Event Hub. An error also marks the end of * event data stream. * * @param partitionContext The partition information where the error occurred. * @param throwable The {@link Throwable} that caused this method to be called. */ /** * This method is called before the partition processor is closed. A partition processor could be closed for various * reasons and the reasons and implementations of this interface can take appropriate actions to cleanup before the * partition processor is shutdown. * * @param partitionContext The partition information for which the processing of events is closed. * @param closeReason The reason for closing this partition processor. * @return a representation of the deferred computation of this call. */ public Mono<Void> close(PartitionContext partitionContext, CloseReason closeReason) { logger.info("Closing partition processor for partition {} with close reason {}", partitionContext.partitionId(), closeReason); return Mono.empty(); } }
This is not slf4j issue. Adding exception as the last param in slf4j `warn` method works as expected. But our `ClientLogger` removes the exception for any log level other than `verbose`. I am not sure why we do that but I prefer to have the users of `ClientLogger` pass in the exception which can contain useful information and we may change the `ClientLogger` to not remove exceptions later.
public void processError(PartitionContext partitionContext, Throwable throwable) { logger.warning("Error occurred in partition processor for partition {} ", partitionContext.partitionId(), throwable); }
logger.warning("Error occurred in partition processor for partition {} ", partitionContext.partitionId(),
public void processError(PartitionContext partitionContext, Throwable throwable) { logger.warning("Error occurred in partition processor for partition {}", partitionContext.partitionId(), throwable); }
class PartitionProcessor { private final ClientLogger logger = new ClientLogger(PartitionProcessor.class); /** * This method is called when this {@link EventProcessor} takes ownership of a new partition and before any events * from this partition are received. * * @param partitionContext The partition information for initialization before events from the partition are * processed. * @return a representation of the deferred computation of this call. */ public Mono<Void> initialize(PartitionContext partitionContext) { logger.info("Initializing partition processor for partition {}", partitionContext.partitionId()); return Mono.empty(); } /** * This method is called when a new event is received for this partition. Processing of this event can happen * asynchronously. * * <p> * This is also a good place to update checkpoints using the {@code partitionContext} as appropriate. * * @param partitionContext The partition information the event data belongs to. * @param eventData {@link EventData} received from this partition. * @return a representation of the deferred computation of this call. */ public abstract Mono<Void> processEvent(PartitionContext partitionContext, EventData eventData); /** * This method is called when an error occurs while receiving events from Event Hub. An error also marks the end of * event data stream. * * @param partitionContext The partition information where the error occurred. * @param throwable The {@link Throwable} that caused this method to be called. */ /** * This method is called before the partition processor is closed. A partition processor could be closed for various * reasons and the reasons and implementations of this interface can take appropriate actions to cleanup before the * partition processor is shutdown. * * @param partitionContext The partition information for which the processing of events is closed. * @param closeReason The reason for closing this partition processor. * @return a representation of the deferred computation of this call. */ public Mono<Void> close(PartitionContext partitionContext, CloseReason closeReason) { logger.info("Closing partition processor for partition {} with close reason {}", partitionContext.partitionId(), closeReason); return Mono.empty(); } }
class PartitionProcessor { private final ClientLogger logger = new ClientLogger(PartitionProcessor.class); /** * This method is called when this {@link EventProcessor} takes ownership of a new partition and before any events * from this partition are received. * * @param partitionContext The partition information for initialization before events from the partition are * processed. * @return a representation of the deferred computation of this call. */ public Mono<Void> initialize(PartitionContext partitionContext) { logger.info("Initializing partition processor for partition {}", partitionContext.partitionId()); return Mono.empty(); } /** * This method is called when a new event is received for this partition. Processing of this event can happen * asynchronously. * * <p> * This is also a good place to update checkpoints using the {@code partitionContext} as appropriate. * * @param partitionContext The partition information the event data belongs to. * @param eventData {@link EventData} received from this partition. * @return a representation of the deferred computation of this call. */ public abstract Mono<Void> processEvent(PartitionContext partitionContext, EventData eventData); /** * This method is called when an error occurs while receiving events from Event Hub. An error also marks the end of * event data stream. * * @param partitionContext The partition information where the error occurred. * @param throwable The {@link Throwable} that caused this method to be called. */ /** * This method is called before the partition processor is closed. A partition processor could be closed for various * reasons and the reasons and implementations of this interface can take appropriate actions to cleanup before the * partition processor is shutdown. * * @param partitionContext The partition information for which the processing of events is closed. * @param closeReason The reason for closing this partition processor. * @return a representation of the deferred computation of this call. */ public Mono<Void> close(PartitionContext partitionContext, CloseReason closeReason) { logger.info("Closing partition processor for partition {} with close reason {}", partitionContext.partitionId(), closeReason); return Mono.empty(); } }
@alzimmermsft Can you please validate that this is sane when the user does not specify a port, and therefore the port value is zero. Thanks!
public NettyAsyncHttpClient build() { HttpClient nettyHttpClient = HttpClient.create() .port(port) .wiretap(enableWiretap) .tcpConfiguration(tcpConfig -> { if (nioEventLoopGroup != null) { tcpConfig = tcpConfig.runOn(nioEventLoopGroup); } if (proxyOptions == null) { return tcpConfig; } ProxyOptions options = proxyOptions.get(); if (options == null) { return tcpConfig; } ProxyProvider.Proxy nettyProxy; switch (options.type()) { case HTTP: nettyProxy = ProxyProvider.Proxy.HTTP; break; case SOCKS4: nettyProxy = ProxyProvider.Proxy.SOCKS4; break; case SOCKS5: nettyProxy = ProxyProvider.Proxy.SOCKS5; break; default: throw logger.logExceptionAsWarning(new IllegalStateException("Unknown Proxy type '" + options.type() + "' in use. Not configuring Netty proxy.")); } return tcpConfig.proxy(ts -> ts.type(nettyProxy).address(options.address())); }); return new NettyAsyncHttpClient(nettyHttpClient); }
.port(port)
public NettyAsyncHttpClient build() { HttpClient nettyHttpClient = HttpClient.create() .port(port) .wiretap(enableWiretap) .tcpConfiguration(tcpConfig -> { if (nioEventLoopGroup != null) { tcpConfig = tcpConfig.runOn(nioEventLoopGroup); } if (proxyOptions != null) { ProxyProvider.Proxy nettyProxy; switch (proxyOptions.type()) { case HTTP: nettyProxy = ProxyProvider.Proxy.HTTP; break; case SOCKS4: nettyProxy = ProxyProvider.Proxy.SOCKS4; break; case SOCKS5: nettyProxy = ProxyProvider.Proxy.SOCKS5; break; default: throw logger.logExceptionAsWarning(new IllegalStateException("Unknown Proxy type '" + proxyOptions.type() + "' in use. Not configuring Netty proxy.")); } return tcpConfig.proxy(ts -> ts.type(nettyProxy).address(proxyOptions.address())); } return tcpConfig; }); return new NettyAsyncHttpClient(nettyHttpClient); }
class NettyAsyncHttpClientBuilder { private final ClientLogger logger = new ClientLogger(NettyAsyncHttpClientBuilder.class); private Supplier<ProxyOptions> proxyOptions; private boolean enableWiretap; private int port; private NioEventLoopGroup nioEventLoopGroup; /** * */ public NettyAsyncHttpClientBuilder() { } /** * * @return A new NettyAsyncHttpClient instance * @throws IllegalStateException If proxy type is unknown. */ /** * Apply the provided proxy configuration to the HttpClient. * * @param proxyOptions the proxy configuration supplier * @return a HttpClient with proxy applied */ public NettyAsyncHttpClientBuilder proxy(Supplier<ProxyOptions> proxyOptions) { this.proxyOptions = proxyOptions; return this; } /** * Apply or remove a wire logger configuration. * * @param enableWiretap wiretap config * @return a HttpClient with wire logging enabled or disabled */ public NettyAsyncHttpClientBuilder wiretap(boolean enableWiretap) { this.enableWiretap = enableWiretap; return this; } /** * Set the port that client should connect to. * * @param port the port * @return a HttpClient with port applied */ public NettyAsyncHttpClientBuilder port(int port) { this.port = port; return this; } /** * Sets the NIO event loop group that will be used to handle IO. * * @param nioEventLoopGroup the thread factory * @return a HttpClient with the NIO event loop group applied */ public NettyAsyncHttpClientBuilder nioEventLoopGroup(NioEventLoopGroup nioEventLoopGroup) { this.nioEventLoopGroup = nioEventLoopGroup; return this; } }
class NettyAsyncHttpClientBuilder { private final ClientLogger logger = new ClientLogger(NettyAsyncHttpClientBuilder.class); private ProxyOptions proxyOptions; private boolean enableWiretap; private int port = 80; private NioEventLoopGroup nioEventLoopGroup; /** * Creates a new builder instance, where a builder is capable of generating multiple instances of * {@link NettyAsyncHttpClient}. */ public NettyAsyncHttpClientBuilder() { } /** * Creates a new {@link NettyAsyncHttpClient} instance on every call, using the configuration set in the builder at * the time of the build method call. * * @return A new NettyAsyncHttpClient instance. * @throws IllegalStateException If the builder is configured to use an unknown proxy type. */ /** * Sets the {@link ProxyOptions proxy options} that the client will use. * * @param proxyOptions The proxy configuration to use. * @return the updated NettyAsyncHttpClientBuilder object */ public NettyAsyncHttpClientBuilder proxy(ProxyOptions proxyOptions) { this.proxyOptions = proxyOptions; return this; } /** * Enables the Netty wiretap feature. * * @param enableWiretap Flag indicating wiretap status * @return the updated NettyAsyncHttpClientBuilder object */ public NettyAsyncHttpClientBuilder wiretap(boolean enableWiretap) { this.enableWiretap = enableWiretap; return this; } /** * Sets the port which this client should connect, which by default will be set to port 80. * * @param port The port to connect to. * @return the updated NettyAsyncHttpClientBuilder object */ public NettyAsyncHttpClientBuilder port(int port) { this.port = port; return this; } /** * Sets the NIO event loop group that will be used to run IO loops. For example, a fixed thread pool can be * specified as shown below: * * {@codesnippet com.azure.core.http.netty.NettyAsyncHttpClientBuilder * * @param nioEventLoopGroup The {@link NioEventLoopGroup} that will run IO loops. * @return the updated NettyAsyncHttpClientBuilder object */ public NettyAsyncHttpClientBuilder nioEventLoopGroup(NioEventLoopGroup nioEventLoopGroup) { this.nioEventLoopGroup = nioEventLoopGroup; return this; } }
To avoid a future bug, we should move the configuration of the proxy into the if block above by inverting the null check.
public NettyAsyncHttpClient build() { HttpClient nettyHttpClient = HttpClient.create() .port(port) .wiretap(enableWiretap) .tcpConfiguration(tcpConfig -> { if (nioEventLoopGroup != null) { tcpConfig = tcpConfig.runOn(nioEventLoopGroup); } if (proxyOptions == null) { return tcpConfig; } ProxyProvider.Proxy nettyProxy; switch (proxyOptions.type()) { case HTTP: nettyProxy = ProxyProvider.Proxy.HTTP; break; case SOCKS4: nettyProxy = ProxyProvider.Proxy.SOCKS4; break; case SOCKS5: nettyProxy = ProxyProvider.Proxy.SOCKS5; break; default: throw logger.logExceptionAsWarning(new IllegalStateException("Unknown Proxy type '" + proxyOptions.type() + "' in use. Not configuring Netty proxy.")); } return tcpConfig.proxy(ts -> ts.type(nettyProxy).address(proxyOptions.address())); }); return new NettyAsyncHttpClient(nettyHttpClient); }
return tcpConfig.proxy(ts -> ts.type(nettyProxy).address(proxyOptions.address()));
public NettyAsyncHttpClient build() { HttpClient nettyHttpClient = HttpClient.create() .port(port) .wiretap(enableWiretap) .tcpConfiguration(tcpConfig -> { if (nioEventLoopGroup != null) { tcpConfig = tcpConfig.runOn(nioEventLoopGroup); } if (proxyOptions != null) { ProxyProvider.Proxy nettyProxy; switch (proxyOptions.type()) { case HTTP: nettyProxy = ProxyProvider.Proxy.HTTP; break; case SOCKS4: nettyProxy = ProxyProvider.Proxy.SOCKS4; break; case SOCKS5: nettyProxy = ProxyProvider.Proxy.SOCKS5; break; default: throw logger.logExceptionAsWarning(new IllegalStateException("Unknown Proxy type '" + proxyOptions.type() + "' in use. Not configuring Netty proxy.")); } return tcpConfig.proxy(ts -> ts.type(nettyProxy).address(proxyOptions.address())); } return tcpConfig; }); return new NettyAsyncHttpClient(nettyHttpClient); }
class NettyAsyncHttpClientBuilder { private final ClientLogger logger = new ClientLogger(NettyAsyncHttpClientBuilder.class); private ProxyOptions proxyOptions; private boolean enableWiretap; private int port = 80; private NioEventLoopGroup nioEventLoopGroup; /** * */ public NettyAsyncHttpClientBuilder() { } /** * * @return A new NettyAsyncHttpClient instance * @throws IllegalStateException If proxy type is unknown. */ /** * Sets the {@link ProxyOptions proxy options} that the client will use. * * @param proxyOptions The proxy configuration to use. * @return the updated NettyAsyncHttpClientBuilder object */ public NettyAsyncHttpClientBuilder proxy(ProxyOptions proxyOptions) { this.proxyOptions = proxyOptions; return this; } /** * Apply or remove a wire logger configuration. * * @param enableWiretap Flag indicating wiretap status * @return the updated NettyAsyncHttpClientBuilder object */ public NettyAsyncHttpClientBuilder wiretap(boolean enableWiretap) { this.enableWiretap = enableWiretap; return this; } /** * Sets the port which this client should connect. * * @param port The port to connect to. * @return the updated NettyAsyncHttpClientBuilder object */ public NettyAsyncHttpClientBuilder port(int port) { this.port = port; return this; } /** * Sets the NIO event loop group that will be used to run IO loops. * * @param nioEventLoopGroup The {@link NioEventLoopGroup} that will run IO loops. * @return the updated NettyAsyncHttpClientBuilder object */ public NettyAsyncHttpClientBuilder nioEventLoopGroup(NioEventLoopGroup nioEventLoopGroup) { this.nioEventLoopGroup = nioEventLoopGroup; return this; } }
class NettyAsyncHttpClientBuilder { private final ClientLogger logger = new ClientLogger(NettyAsyncHttpClientBuilder.class); private ProxyOptions proxyOptions; private boolean enableWiretap; private int port = 80; private NioEventLoopGroup nioEventLoopGroup; /** * Creates a new builder instance, where a builder is capable of generating multiple instances of * {@link NettyAsyncHttpClient}. */ public NettyAsyncHttpClientBuilder() { } /** * Creates a new {@link NettyAsyncHttpClient} instance on every call, using the configuration set in the builder at * the time of the build method call. * * @return A new NettyAsyncHttpClient instance. * @throws IllegalStateException If the builder is configured to use an unknown proxy type. */ /** * Sets the {@link ProxyOptions proxy options} that the client will use. * * @param proxyOptions The proxy configuration to use. * @return the updated NettyAsyncHttpClientBuilder object */ public NettyAsyncHttpClientBuilder proxy(ProxyOptions proxyOptions) { this.proxyOptions = proxyOptions; return this; } /** * Enables the Netty wiretap feature. * * @param enableWiretap Flag indicating wiretap status * @return the updated NettyAsyncHttpClientBuilder object */ public NettyAsyncHttpClientBuilder wiretap(boolean enableWiretap) { this.enableWiretap = enableWiretap; return this; } /** * Sets the port which this client should connect, which by default will be set to port 80. * * @param port The port to connect to. * @return the updated NettyAsyncHttpClientBuilder object */ public NettyAsyncHttpClientBuilder port(int port) { this.port = port; return this; } /** * Sets the NIO event loop group that will be used to run IO loops. For example, a fixed thread pool can be * specified as shown below: * * {@codesnippet com.azure.core.http.netty.NettyAsyncHttpClientBuilder * * @param nioEventLoopGroup The {@link NioEventLoopGroup} that will run IO loops. * @return the updated NettyAsyncHttpClientBuilder object */ public NettyAsyncHttpClientBuilder nioEventLoopGroup(NioEventLoopGroup nioEventLoopGroup) { this.nioEventLoopGroup = nioEventLoopGroup; return this; } }
Did you make this change?
public NettyAsyncHttpClient build() { HttpClient nettyHttpClient = HttpClient.create() .port(port) .wiretap(enableWiretap) .tcpConfiguration(tcpConfig -> { if (nioEventLoopGroup != null) { tcpConfig = tcpConfig.runOn(nioEventLoopGroup); } if (proxyOptions == null) { return tcpConfig; } ProxyProvider.Proxy nettyProxy; switch (proxyOptions.type()) { case HTTP: nettyProxy = ProxyProvider.Proxy.HTTP; break; case SOCKS4: nettyProxy = ProxyProvider.Proxy.SOCKS4; break; case SOCKS5: nettyProxy = ProxyProvider.Proxy.SOCKS5; break; default: throw logger.logExceptionAsWarning(new IllegalStateException("Unknown Proxy type '" + proxyOptions.type() + "' in use. Not configuring Netty proxy.")); } return tcpConfig.proxy(ts -> ts.type(nettyProxy).address(proxyOptions.address())); }); return new NettyAsyncHttpClient(nettyHttpClient); }
return tcpConfig.proxy(ts -> ts.type(nettyProxy).address(proxyOptions.address()));
public NettyAsyncHttpClient build() { HttpClient nettyHttpClient = HttpClient.create() .port(port) .wiretap(enableWiretap) .tcpConfiguration(tcpConfig -> { if (nioEventLoopGroup != null) { tcpConfig = tcpConfig.runOn(nioEventLoopGroup); } if (proxyOptions != null) { ProxyProvider.Proxy nettyProxy; switch (proxyOptions.type()) { case HTTP: nettyProxy = ProxyProvider.Proxy.HTTP; break; case SOCKS4: nettyProxy = ProxyProvider.Proxy.SOCKS4; break; case SOCKS5: nettyProxy = ProxyProvider.Proxy.SOCKS5; break; default: throw logger.logExceptionAsWarning(new IllegalStateException("Unknown Proxy type '" + proxyOptions.type() + "' in use. Not configuring Netty proxy.")); } return tcpConfig.proxy(ts -> ts.type(nettyProxy).address(proxyOptions.address())); } return tcpConfig; }); return new NettyAsyncHttpClient(nettyHttpClient); }
class NettyAsyncHttpClientBuilder { private final ClientLogger logger = new ClientLogger(NettyAsyncHttpClientBuilder.class); private ProxyOptions proxyOptions; private boolean enableWiretap; private int port = 80; private NioEventLoopGroup nioEventLoopGroup; /** * */ public NettyAsyncHttpClientBuilder() { } /** * * @return A new NettyAsyncHttpClient instance * @throws IllegalStateException If proxy type is unknown. */ /** * Sets the {@link ProxyOptions proxy options} that the client will use. * * @param proxyOptions The proxy configuration to use. * @return the updated NettyAsyncHttpClientBuilder object */ public NettyAsyncHttpClientBuilder proxy(ProxyOptions proxyOptions) { this.proxyOptions = proxyOptions; return this; } /** * Apply or remove a wire logger configuration. * * @param enableWiretap Flag indicating wiretap status * @return the updated NettyAsyncHttpClientBuilder object */ public NettyAsyncHttpClientBuilder wiretap(boolean enableWiretap) { this.enableWiretap = enableWiretap; return this; } /** * Sets the port which this client should connect. * * @param port The port to connect to. * @return the updated NettyAsyncHttpClientBuilder object */ public NettyAsyncHttpClientBuilder port(int port) { this.port = port; return this; } /** * Sets the NIO event loop group that will be used to run IO loops. * * @param nioEventLoopGroup The {@link NioEventLoopGroup} that will run IO loops. * @return the updated NettyAsyncHttpClientBuilder object */ public NettyAsyncHttpClientBuilder nioEventLoopGroup(NioEventLoopGroup nioEventLoopGroup) { this.nioEventLoopGroup = nioEventLoopGroup; return this; } }
class NettyAsyncHttpClientBuilder { private final ClientLogger logger = new ClientLogger(NettyAsyncHttpClientBuilder.class); private ProxyOptions proxyOptions; private boolean enableWiretap; private int port = 80; private NioEventLoopGroup nioEventLoopGroup; /** * Creates a new builder instance, where a builder is capable of generating multiple instances of * {@link NettyAsyncHttpClient}. */ public NettyAsyncHttpClientBuilder() { } /** * Creates a new {@link NettyAsyncHttpClient} instance on every call, using the configuration set in the builder at * the time of the build method call. * * @return A new NettyAsyncHttpClient instance. * @throws IllegalStateException If the builder is configured to use an unknown proxy type. */ /** * Sets the {@link ProxyOptions proxy options} that the client will use. * * @param proxyOptions The proxy configuration to use. * @return the updated NettyAsyncHttpClientBuilder object */ public NettyAsyncHttpClientBuilder proxy(ProxyOptions proxyOptions) { this.proxyOptions = proxyOptions; return this; } /** * Enables the Netty wiretap feature. * * @param enableWiretap Flag indicating wiretap status * @return the updated NettyAsyncHttpClientBuilder object */ public NettyAsyncHttpClientBuilder wiretap(boolean enableWiretap) { this.enableWiretap = enableWiretap; return this; } /** * Sets the port which this client should connect, which by default will be set to port 80. * * @param port The port to connect to. * @return the updated NettyAsyncHttpClientBuilder object */ public NettyAsyncHttpClientBuilder port(int port) { this.port = port; return this; } /** * Sets the NIO event loop group that will be used to run IO loops. For example, a fixed thread pool can be * specified as shown below: * * {@codesnippet com.azure.core.http.netty.NettyAsyncHttpClientBuilder * * @param nioEventLoopGroup The {@link NioEventLoopGroup} that will run IO loops. * @return the updated NettyAsyncHttpClientBuilder object */ public NettyAsyncHttpClientBuilder nioEventLoopGroup(NioEventLoopGroup nioEventLoopGroup) { this.nioEventLoopGroup = nioEventLoopGroup; return this; } }
Let's revert this, the line of code is meant to mutate the object.
public Object invoke(Object proxy, final Method method, Object[] args) { try { final SwaggerMethodParser methodParser; final HttpRequest request; if (method.isAnnotationPresent(ResumeOperation.class)) { OperationDescription opDesc = ImplUtils.findFirstOfType(args, OperationDescription.class); Method resumeMethod = determineResumeMethod(method, opDesc.methodName()); methodParser = methodParser(resumeMethod); request = createHttpRequest(opDesc, methodParser, args); final Type returnType = methodParser.returnType(); return handleResumeOperation(request, opDesc, methodParser, returnType, startTracingSpan(resumeMethod, Context.NONE)); } else { methodParser = methodParser(method); request = createHttpRequest(methodParser, args); Context context = methodParser.context(args).addData("caller-method", methodParser.fullyQualifiedMethodName()); final Context context1 = startTracingSpan(method, context); final Mono<HttpResponse> asyncResponse = validateLength(request).then( send(request, context1)); Mono<HttpDecodedResponse> asyncDecodedResponse = this.decoder.decode(asyncResponse, methodParser); return handleHttpResponse(request, asyncDecodedResponse, methodParser, methodParser.returnType(), context); } } catch (Exception e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } }
final Context context1 = startTracingSpan(method, context);
public Object invoke(Object proxy, final Method method, Object[] args) { try { final SwaggerMethodParser methodParser; final HttpRequest request; if (method.isAnnotationPresent(ResumeOperation.class)) { OperationDescription opDesc = ImplUtils.findFirstOfType(args, OperationDescription.class); Method resumeMethod = determineResumeMethod(method, opDesc.methodName()); methodParser = methodParser(resumeMethod); request = createHttpRequest(opDesc, methodParser, args); final Type returnType = methodParser.returnType(); return handleResumeOperation(request, opDesc, methodParser, returnType, startTracingSpan(resumeMethod, Context.NONE)); } else { methodParser = methodParser(method); request = createHttpRequest(methodParser, args); Context context = methodParser.context(args).addData("caller-method", methodParser.fullyQualifiedMethodName()); context = startTracingSpan(method, context); if (request.body() != null) { request.body(validateLength(request)); } final Mono<HttpResponse> asyncResponse = send(request, context); Mono<HttpDecodedResponse> asyncDecodedResponse = this.decoder.decode(asyncResponse, methodParser); return handleHttpResponse(request, asyncDecodedResponse, methodParser, methodParser.returnType(), context); } } catch (Exception e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } }
class RestProxy implements InvocationHandler { private final ClientLogger logger = new ClientLogger(RestProxy.class); private final HttpPipeline httpPipeline; private final SerializerAdapter serializer; private final SwaggerInterfaceParser interfaceParser; private final HttpResponseDecoder decoder; /** * Create a RestProxy. * * @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP * requests. * @param serializer the serializer that will be used to convert response bodies to POJOs. * @param interfaceParser the parser that contains information about the interface describing REST API methods * that this RestProxy "implements". */ public RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) { this.httpPipeline = httpPipeline; this.serializer = serializer; this.interfaceParser = interfaceParser; this.decoder = new HttpResponseDecoder(this.serializer); } /** * Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger * interface that this RestProxy was created to "implement". * * @param method the method to get a SwaggerMethodParser for * @return the SwaggerMethodParser for the provided method */ private SwaggerMethodParser methodParser(Method method) { return interfaceParser.methodParser(method); } /** * Get the SerializerAdapter used by this RestProxy. * * @return The SerializerAdapter used by this RestProxy */ public SerializerAdapter serializer() { return serializer; } /** * Send the provided request asynchronously, applying any request policies provided to the HttpClient instance. * * @param request the HTTP request to send * @param contextData the context * @return a {@link Mono} that emits HttpResponse asynchronously */ public Mono<HttpResponse> send(HttpRequest request, Context contextData) { return httpPipeline.send(request, contextData); } @Override private Mono<HttpRequest> validateLength(final HttpRequest request) { Flux<ByteBuffer> body = request.body(); if (body == null) { return Mono.just(request); } Long expectedLength = Long.valueOf(request.headers().value("Content-Length")); return FluxUtil.collectBytesInByteBufferStream(body).doOnNext(bb -> { if (bb.length > expectedLength) { throw new UnexpectedLengthException( String.format("Request body emitted more bytes than the expected %d bytes.", expectedLength), bb.length, expectedLength); } else if (bb.length != expectedLength) { throw new UnexpectedLengthException( String.format("Request body emitted less bytes than the expected %d bytes.", expectedLength), bb.length, expectedLength); } }).then(Mono.just(request)); } private Method determineResumeMethod(Method method, String resumeMethodName) { for (Method potentialResumeMethod : method.getDeclaringClass().getMethods()) { if (potentialResumeMethod.getName().equals(resumeMethodName)) { return potentialResumeMethod; } } return null; } /** * Starts the tracing span for the current service call, additionally set metadata attributes on the span by passing * additional context information. * @param method Service method being called. * @param context Context information about the current service call. * @return The updated context containing the span context. */ private Context startTracingSpan(Method method, Context context) { String spanName = String.format("Azure.%s/%s", interfaceParser.serviceName(), method.getName()); context = TracerProxy.setSpanName(spanName, context); return TracerProxy.start(spanName, context); } /** * Create a HttpRequest for the provided Swagger method using the provided arguments. * * @param methodParser the Swagger method parser to use * @param args the arguments to use to populate the method's annotation values * @return a HttpRequest * @throws IOException thrown if the body contents cannot be serialized */ private HttpRequest createHttpRequest(SwaggerMethodParser methodParser, Object[] args) throws IOException { UrlBuilder urlBuilder; final String path = methodParser.path(args); final UrlBuilder pathUrlBuilder = UrlBuilder.parse(path); if (pathUrlBuilder.scheme() != null) { urlBuilder = pathUrlBuilder; } else { urlBuilder = new UrlBuilder(); final String scheme = methodParser.scheme(args); urlBuilder.scheme(scheme); final String host = methodParser.host(args); urlBuilder.host(host); if (path != null && !path.isEmpty() && !path.equals("/")) { String hostPath = urlBuilder.path(); if (hostPath == null || hostPath.isEmpty() || hostPath.equals("/")) { urlBuilder.path(path); } else { urlBuilder.path(hostPath + "/" + path); } } } for (final EncodedParameter queryParameter : methodParser.encodedQueryParameters(args)) { urlBuilder.setQueryParameter(queryParameter.name(), queryParameter.encodedValue()); } final URL url = urlBuilder.toURL(); final HttpRequest request = configRequest(new HttpRequest(methodParser.httpMethod(), url), methodParser, args); for (final HttpHeader header : methodParser.headers(args)) { request.header(header.name(), header.value()); } return request; } /** * Create a HttpRequest for the provided Swagger method using the provided arguments. * * @param methodParser the Swagger method parser to use * @param args the arguments to use to populate the method's annotation values * @return a HttpRequest * @throws IOException thrown if the body contents cannot be serialized */ private HttpRequest createHttpRequest(OperationDescription operationDescription, SwaggerMethodParser methodParser, Object[] args) throws IOException { final HttpRequest request = configRequest(new HttpRequest(methodParser.httpMethod(), operationDescription.url()), methodParser, args); for (final String headerName : operationDescription.headers().keySet()) { request.header(headerName, operationDescription.headers().get(headerName)); } return request; } @SuppressWarnings("unchecked") private HttpRequest configRequest(HttpRequest request, SwaggerMethodParser methodParser, Object[] args) throws IOException { final Object bodyContentObject = methodParser.body(args); if (bodyContentObject == null) { request.headers().put("Content-Length", "0"); } else { String contentType = methodParser.bodyContentType(); if (contentType == null || contentType.isEmpty()) { if (bodyContentObject instanceof byte[] || bodyContentObject instanceof String) { contentType = ContentType.APPLICATION_OCTET_STREAM; } else { contentType = ContentType.APPLICATION_JSON; } } request.headers().put("Content-Type", contentType); boolean isJson = false; final String[] contentTypeParts = contentType.split(";"); for (String contentTypePart : contentTypeParts) { if (contentTypePart.trim().equalsIgnoreCase(ContentType.APPLICATION_JSON)) { isJson = true; break; } } if (isJson) { final String bodyContentString = serializer.serialize(bodyContentObject, SerializerEncoding.JSON); request.body(bodyContentString); } else if (FluxUtil.isFluxByteBuffer(methodParser.bodyJavaType())) { request.body((Flux<ByteBuffer>) bodyContentObject); } else if (bodyContentObject instanceof byte[]) { request.body((byte[]) bodyContentObject); } else if (bodyContentObject instanceof String) { final String bodyContentString = (String) bodyContentObject; if (!bodyContentString.isEmpty()) { request.body(bodyContentString); } } else { final String bodyContentString = serializer.serialize(bodyContentObject, SerializerEncoding.fromHeaders(request.headers())); request.body(bodyContentString); } } return request; } private Mono<HttpDecodedResponse> ensureExpectedStatus(Mono<HttpDecodedResponse> asyncDecodedResponse, final SwaggerMethodParser methodParser) { return asyncDecodedResponse .flatMap(decodedHttpResponse -> ensureExpectedStatus(decodedHttpResponse, methodParser, null)); } private static Exception instantiateUnexpectedException(UnexpectedExceptionInformation exception, HttpResponse httpResponse, String responseContent, Object responseDecodedContent) { final int responseStatusCode = httpResponse.statusCode(); String contentType = httpResponse.headerValue("Content-Type"); String bodyRepresentation; if ("application/octet-stream".equalsIgnoreCase(contentType)) { bodyRepresentation = "(" + httpResponse.headerValue("Content-Length") + "-byte body)"; } else { bodyRepresentation = responseContent.isEmpty() ? "(empty body)" : "\"" + responseContent + "\""; } Exception result; try { final Constructor<? extends HttpResponseException> exceptionConstructor = exception.exceptionType().getConstructor(String.class, HttpResponse.class, exception.exceptionBodyType()); result = exceptionConstructor.newInstance("Status code " + responseStatusCode + ", " + bodyRepresentation, httpResponse, responseDecodedContent); } catch (ReflectiveOperationException e) { String message = "Status code " + responseStatusCode + ", but an instance of " + exception.exceptionType().getCanonicalName() + " cannot be created." + " Response body: " + bodyRepresentation; result = new IOException(message, e); } return result; } /** * Create a publisher that (1) emits error if the provided response {@code decodedResponse} has * 'disallowed status code' OR (2) emits provided response if it's status code ia allowed. * * 'disallowed status code' is one of the status code defined in the provided SwaggerMethodParser * or is in the int[] of additional allowed status codes. * * @param decodedResponse The HttpResponse to check. * @param methodParser The method parser that contains information about the service interface * method that initiated the HTTP request. * @param additionalAllowedStatusCodes Additional allowed status codes that are permitted based * on the context of the HTTP request. * @return An async-version of the provided decodedResponse. */ public Mono<HttpDecodedResponse> ensureExpectedStatus(final HttpDecodedResponse decodedResponse, final SwaggerMethodParser methodParser, int[] additionalAllowedStatusCodes) { final int responseStatusCode = decodedResponse.sourceResponse().statusCode(); final Mono<HttpDecodedResponse> asyncResult; if (!methodParser.isExpectedResponseStatusCode(responseStatusCode, additionalAllowedStatusCodes)) { Mono<String> bodyAsString = decodedResponse.sourceResponse().bodyAsString(); asyncResult = bodyAsString.flatMap((Function<String, Mono<HttpDecodedResponse>>) responseContent -> { Mono<Object> decodedErrorBody = decodedResponse.decodedBody(); return decodedErrorBody.flatMap((Function<Object, Mono<HttpDecodedResponse>>) responseDecodedErrorObject -> { Throwable exception = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode), decodedResponse.sourceResponse(), responseContent, responseDecodedErrorObject); return Mono.error(exception); }).switchIfEmpty(Mono.defer((Supplier<Mono<HttpDecodedResponse>>) () -> { Throwable exception = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode), decodedResponse.sourceResponse(), responseContent, null); return Mono.error(exception); })); }).switchIfEmpty(Mono.defer((Supplier<Mono<HttpDecodedResponse>>) () -> { Throwable exception = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode), decodedResponse.sourceResponse(), "", null); return Mono.error(exception); })); } else { asyncResult = Mono.just(decodedResponse); } return asyncResult; } private Mono<?> handleRestResponseReturnType(HttpDecodedResponse response, SwaggerMethodParser methodParser, Type entityType) { Mono<?> asyncResult; if (TypeUtil.isTypeOrSubTypeOf(entityType, Response.class)) { Type bodyType = TypeUtil.getRestResponseBodyType(entityType); if (TypeUtil.isTypeOrSubTypeOf(bodyType, Void.class)) { asyncResult = response.sourceResponse().body().ignoreElements() .then(Mono.just(createResponse(response, entityType, null))); } else { asyncResult = handleBodyReturnType(response, methodParser, bodyType) .map((Function<Object, Response<?>>) bodyAsObject -> createResponse(response, entityType, bodyAsObject)) .switchIfEmpty(Mono.defer((Supplier<Mono<Response<?>>>) () -> Mono.just(createResponse(response, entityType, null)))); } } else { asyncResult = handleBodyReturnType(response, methodParser, entityType); } return asyncResult; } @SuppressWarnings("unchecked") private Response<?> createResponse(HttpDecodedResponse response, Type entityType, Object bodyAsObject) { final HttpResponse httpResponse = response.sourceResponse(); final HttpRequest httpRequest = httpResponse.request(); final int responseStatusCode = httpResponse.statusCode(); final HttpHeaders responseHeaders = httpResponse.headers(); Class<? extends Response<?>> cls = (Class<? extends Response<?>>) TypeUtil.getRawClass(entityType); if (cls.equals(Response.class)) { cls = (Class<? extends Response<?>>) (Object) ResponseBase.class; } else if (cls.equals(PagedResponse.class)) { cls = (Class<? extends Response<?>>) (Object) PagedResponseBase.class; if (bodyAsObject != null && !TypeUtil.isTypeOrSubTypeOf(bodyAsObject.getClass(), Page.class)) { throw logger.logExceptionAsError(new RuntimeException("Unable to create PagedResponse<T>. Body must be of a type that implements: " + Page.class)); } } List<Constructor<?>> constructors = Arrays.stream(cls.getDeclaredConstructors()) .filter(constructor -> { int paramCount = constructor.getParameterCount(); return paramCount >= 3 && paramCount <= 5; }) .sorted(Comparator.comparingInt(Constructor::getParameterCount)) .collect(Collectors.toList()); if (constructors.isEmpty()) { throw logger.logExceptionAsError(new RuntimeException("Cannot find suitable constructor for class " + cls)); } for (Constructor<?> constructor : constructors) { final Constructor<? extends Response<?>> ctor = (Constructor<? extends Response<?>>) constructor; try { final int paramCount = constructor.getParameterCount(); switch (paramCount) { case 3: return ctor.newInstance(httpRequest, responseStatusCode, responseHeaders); case 4: return ctor.newInstance(httpRequest, responseStatusCode, responseHeaders, bodyAsObject); case 5: return ctor.newInstance(httpRequest, responseStatusCode, responseHeaders, bodyAsObject, response.decodedHeaders().block()); default: throw logger.logExceptionAsError(new IllegalStateException("Response constructor with expected parameters not found.")); } } catch (IllegalAccessException | InvocationTargetException | InstantiationException e) { throw logger.logExceptionAsError(reactor.core.Exceptions.propagate(e)); } } throw logger.logExceptionAsError(new RuntimeException("Cannot find suitable constructor for class " + cls)); } protected final Mono<?> handleBodyReturnType(final HttpDecodedResponse response, final SwaggerMethodParser methodParser, final Type entityType) { final int responseStatusCode = response.sourceResponse().statusCode(); final HttpMethod httpMethod = methodParser.httpMethod(); final Type returnValueWireType = methodParser.returnValueWireType(); final Mono<?> asyncResult; if (httpMethod == HttpMethod.HEAD && (TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.TYPE) || TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.class))) { boolean isSuccess = (responseStatusCode / 100) == 2; asyncResult = Mono.just(isSuccess); } else if (TypeUtil.isTypeOrSubTypeOf(entityType, byte[].class)) { Mono<byte[]> responseBodyBytesAsync = response.sourceResponse().bodyAsByteArray(); if (returnValueWireType == Base64Url.class) { responseBodyBytesAsync = responseBodyBytesAsync.map(base64UrlBytes -> new Base64Url(base64UrlBytes).decodedBytes()); } asyncResult = responseBodyBytesAsync; } else if (FluxUtil.isFluxByteBuffer(entityType)) { asyncResult = Mono.just(response.sourceResponse().body()); } else { asyncResult = response.decodedBody(); } return asyncResult; } protected Object handleHttpResponse(final HttpRequest httpRequest, Mono<HttpDecodedResponse> asyncDecodedHttpResponse, SwaggerMethodParser methodParser, Type returnType, Context context) { return handleRestReturnType(asyncDecodedHttpResponse, methodParser, returnType, context); } protected Object handleResumeOperation(HttpRequest httpRequest, OperationDescription operationDescription, SwaggerMethodParser methodParser, Type returnType, Context context) throws Exception { throw new Exception("The resume operation is not available in the base RestProxy class."); } /** * Handle the provided asynchronous HTTP response and return the deserialized value. * * @param asyncHttpDecodedResponse the asynchronous HTTP response to the original HTTP request * @param methodParser the SwaggerMethodParser that the request originates from * @param returnType the type of value that will be returned * @param context Additional context that is passed through the Http pipeline during the service call. * @return the deserialized result */ public final Object handleRestReturnType(Mono<HttpDecodedResponse> asyncHttpDecodedResponse, final SwaggerMethodParser methodParser, final Type returnType, Context context) { final Mono<HttpDecodedResponse> asyncExpectedResponse = ensureExpectedStatus(asyncHttpDecodedResponse, methodParser) .doOnEach(RestProxy::endTracingSpan) .subscriberContext(reactor.util.context.Context.of("TRACING_CONTEXT", context)); final Object result; if (TypeUtil.isTypeOrSubTypeOf(returnType, Mono.class)) { final Type monoTypeParam = TypeUtil.getTypeArgument(returnType); if (TypeUtil.isTypeOrSubTypeOf(monoTypeParam, Void.class)) { result = asyncExpectedResponse.then(); } else { result = asyncExpectedResponse.flatMap(response -> handleRestResponseReturnType(response, methodParser, monoTypeParam)); } } else if (FluxUtil.isFluxByteBuffer(returnType)) { result = asyncExpectedResponse.flatMapMany(ar -> ar.sourceResponse().body()); } else if (TypeUtil.isTypeOrSubTypeOf(returnType, void.class) || TypeUtil.isTypeOrSubTypeOf(returnType, Void.class)) { asyncExpectedResponse.block(); result = null; } else { result = asyncExpectedResponse .flatMap(httpResponse -> handleRestResponseReturnType(httpResponse, methodParser, returnType)) .block(); } return result; } private static void endTracingSpan(Signal<HttpDecodedResponse> signal) { if (signal.isOnComplete() || signal.isOnSubscribe()) { return; } reactor.util.context.Context context = signal.getContext(); Optional<Context> tracingContext = context.getOrEmpty("TRACING_CONTEXT"); if (!tracingContext.isPresent()) { return; } int statusCode = 0; HttpDecodedResponse httpDecodedResponse; Throwable throwable = null; if (signal.hasValue()) { httpDecodedResponse = signal.get(); statusCode = httpDecodedResponse.sourceResponse().statusCode(); } else if (signal.hasError()) { throwable = signal.getThrowable(); if (throwable instanceof HttpResponseException) { HttpResponseException exception = (HttpResponseException) throwable; statusCode = exception.response().statusCode(); } } TracerProxy.end(statusCode, throwable, tracingContext.get()); } /** * Create an instance of the default serializer. * * @return the default serializer */ private static SerializerAdapter createDefaultSerializer() { return JacksonAdapter.createDefaultSerializerAdapter(); } /** * Create the default HttpPipeline. * * @return the default HttpPipeline */ public static HttpPipeline createDefaultPipeline() { return createDefaultPipeline((HttpPipelinePolicy) null); } /** * Create the default HttpPipeline. * * @param credentials the credentials to use to apply authentication to the pipeline * @return the default HttpPipeline */ public static HttpPipeline createDefaultPipeline(TokenCredential credentials) { return createDefaultPipeline(new BearerTokenAuthenticationPolicy(credentials)); } /** * Create the default HttpPipeline. * @param credentialsPolicy the credentials policy factory to use to apply authentication to the * pipeline * @return the default HttpPipeline */ public static HttpPipeline createDefaultPipeline(HttpPipelinePolicy credentialsPolicy) { List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy()); policies.add(new RetryPolicy()); policies.add(new CookiePolicy()); if (credentialsPolicy != null) { policies.add(credentialsPolicy); } return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface) { return create(swaggerInterface, createDefaultPipeline(), createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) { return create(swaggerInterface, httpPipeline, createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http * requests * @param serializer the serializer that will be used to convert POJOs to and from request and * response bodies * @param <A> the type of the Swagger interface. * @return a proxy implementation of the provided Swagger interface */ @SuppressWarnings("unchecked") public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) { final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer); final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser); return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface}, restProxy); } }
class RestProxy implements InvocationHandler { private final ClientLogger logger = new ClientLogger(RestProxy.class); private final HttpPipeline httpPipeline; private final SerializerAdapter serializer; private final SwaggerInterfaceParser interfaceParser; private final HttpResponseDecoder decoder; /** * Create a RestProxy. * * @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP * requests. * @param serializer the serializer that will be used to convert response bodies to POJOs. * @param interfaceParser the parser that contains information about the interface describing REST API methods * that this RestProxy "implements". */ public RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) { this.httpPipeline = httpPipeline; this.serializer = serializer; this.interfaceParser = interfaceParser; this.decoder = new HttpResponseDecoder(this.serializer); } /** * Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger * interface that this RestProxy was created to "implement". * * @param method the method to get a SwaggerMethodParser for * @return the SwaggerMethodParser for the provided method */ private SwaggerMethodParser methodParser(Method method) { return interfaceParser.methodParser(method); } /** * Get the SerializerAdapter used by this RestProxy. * * @return The SerializerAdapter used by this RestProxy */ public SerializerAdapter serializer() { return serializer; } /** * Send the provided request asynchronously, applying any request policies provided to the HttpClient instance. * * @param request the HTTP request to send * @param contextData the context * @return a {@link Mono} that emits HttpResponse asynchronously */ public Mono<HttpResponse> send(HttpRequest request, Context contextData) { return httpPipeline.send(request, contextData); } @Override private Flux<ByteBuffer> validateLength(final HttpRequest request) { final Flux<ByteBuffer> bbFlux = request.body(); if (bbFlux == null) { return Flux.empty(); } return Flux.defer(() -> { Long expectedLength = Long.valueOf(request.headers().value("Content-Length")); final long[] currentTotalLength = new long[1]; return bbFlux.doOnEach(s -> { if (s.isOnNext()) { ByteBuffer byteBuffer = s.get(); int currentLength = (byteBuffer == null) ? 0 : byteBuffer.remaining(); currentTotalLength[0] += currentLength; if (currentTotalLength[0] > expectedLength) { throw logger.logExceptionAsError(new UnexpectedLengthException( String.format("Request body emitted %d bytes more than the expected %d bytes.", currentTotalLength[0], expectedLength), currentTotalLength[0], expectedLength)); } } else if (s.isOnComplete()) { if (expectedLength.compareTo(currentTotalLength[0]) != 0) { throw logger.logExceptionAsError(new UnexpectedLengthException( String.format("Request body emitted %d bytes less than the expected %d bytes.", currentTotalLength[0], expectedLength), currentTotalLength[0], expectedLength)); } } }); }); } private Method determineResumeMethod(Method method, String resumeMethodName) { for (Method potentialResumeMethod : method.getDeclaringClass().getMethods()) { if (potentialResumeMethod.getName().equals(resumeMethodName)) { return potentialResumeMethod; } } return null; } /** * Starts the tracing span for the current service call, additionally set metadata attributes on the span by passing * additional context information. * @param method Service method being called. * @param context Context information about the current service call. * @return The updated context containing the span context. */ private Context startTracingSpan(Method method, Context context) { String spanName = String.format("Azure.%s/%s", interfaceParser.serviceName(), method.getName()); context = TracerProxy.setSpanName(spanName, context); return TracerProxy.start(spanName, context); } /** * Create a HttpRequest for the provided Swagger method using the provided arguments. * * @param methodParser the Swagger method parser to use * @param args the arguments to use to populate the method's annotation values * @return a HttpRequest * @throws IOException thrown if the body contents cannot be serialized */ private HttpRequest createHttpRequest(SwaggerMethodParser methodParser, Object[] args) throws IOException { UrlBuilder urlBuilder; final String path = methodParser.path(args); final UrlBuilder pathUrlBuilder = UrlBuilder.parse(path); if (pathUrlBuilder.scheme() != null) { urlBuilder = pathUrlBuilder; } else { urlBuilder = new UrlBuilder(); final String scheme = methodParser.scheme(args); urlBuilder.scheme(scheme); final String host = methodParser.host(args); urlBuilder.host(host); if (path != null && !path.isEmpty() && !path.equals("/")) { String hostPath = urlBuilder.path(); if (hostPath == null || hostPath.isEmpty() || hostPath.equals("/")) { urlBuilder.path(path); } else { urlBuilder.path(hostPath + "/" + path); } } } for (final EncodedParameter queryParameter : methodParser.encodedQueryParameters(args)) { urlBuilder.setQueryParameter(queryParameter.name(), queryParameter.encodedValue()); } final URL url = urlBuilder.toURL(); final HttpRequest request = configRequest(new HttpRequest(methodParser.httpMethod(), url), methodParser, args); for (final HttpHeader header : methodParser.headers(args)) { request.header(header.name(), header.value()); } return request; } /** * Create a HttpRequest for the provided Swagger method using the provided arguments. * * @param methodParser the Swagger method parser to use * @param args the arguments to use to populate the method's annotation values * @return a HttpRequest * @throws IOException thrown if the body contents cannot be serialized */ private HttpRequest createHttpRequest(OperationDescription operationDescription, SwaggerMethodParser methodParser, Object[] args) throws IOException { final HttpRequest request = configRequest(new HttpRequest(methodParser.httpMethod(), operationDescription.url()), methodParser, args); for (final String headerName : operationDescription.headers().keySet()) { request.header(headerName, operationDescription.headers().get(headerName)); } return request; } @SuppressWarnings("unchecked") private HttpRequest configRequest(HttpRequest request, SwaggerMethodParser methodParser, Object[] args) throws IOException { final Object bodyContentObject = methodParser.body(args); if (bodyContentObject == null) { request.headers().put("Content-Length", "0"); } else { String contentType = methodParser.bodyContentType(); if (contentType == null || contentType.isEmpty()) { if (bodyContentObject instanceof byte[] || bodyContentObject instanceof String) { contentType = ContentType.APPLICATION_OCTET_STREAM; } else { contentType = ContentType.APPLICATION_JSON; } } request.headers().put("Content-Type", contentType); boolean isJson = false; final String[] contentTypeParts = contentType.split(";"); for (String contentTypePart : contentTypeParts) { if (contentTypePart.trim().equalsIgnoreCase(ContentType.APPLICATION_JSON)) { isJson = true; break; } } if (isJson) { final String bodyContentString = serializer.serialize(bodyContentObject, SerializerEncoding.JSON); request.body(bodyContentString); } else if (FluxUtil.isFluxByteBuffer(methodParser.bodyJavaType())) { request.body((Flux<ByteBuffer>) bodyContentObject); } else if (bodyContentObject instanceof byte[]) { request.body((byte[]) bodyContentObject); } else if (bodyContentObject instanceof String) { final String bodyContentString = (String) bodyContentObject; if (!bodyContentString.isEmpty()) { request.body(bodyContentString); } } else if (bodyContentObject instanceof ByteBuffer) { request.body(Flux.just((ByteBuffer) bodyContentObject)); } else { final String bodyContentString = serializer.serialize(bodyContentObject, SerializerEncoding.fromHeaders(request.headers())); request.body(bodyContentString); } } return request; } private Mono<HttpDecodedResponse> ensureExpectedStatus(Mono<HttpDecodedResponse> asyncDecodedResponse, final SwaggerMethodParser methodParser) { return asyncDecodedResponse .flatMap(decodedHttpResponse -> ensureExpectedStatus(decodedHttpResponse, methodParser, null)); } private static Exception instantiateUnexpectedException(UnexpectedExceptionInformation exception, HttpResponse httpResponse, String responseContent, Object responseDecodedContent) { final int responseStatusCode = httpResponse.statusCode(); String contentType = httpResponse.headerValue("Content-Type"); String bodyRepresentation; if ("application/octet-stream".equalsIgnoreCase(contentType)) { bodyRepresentation = "(" + httpResponse.headerValue("Content-Length") + "-byte body)"; } else { bodyRepresentation = responseContent.isEmpty() ? "(empty body)" : "\"" + responseContent + "\""; } Exception result; try { final Constructor<? extends HttpResponseException> exceptionConstructor = exception.exceptionType().getConstructor(String.class, HttpResponse.class, exception.exceptionBodyType()); result = exceptionConstructor.newInstance("Status code " + responseStatusCode + ", " + bodyRepresentation, httpResponse, responseDecodedContent); } catch (ReflectiveOperationException e) { String message = "Status code " + responseStatusCode + ", but an instance of " + exception.exceptionType().getCanonicalName() + " cannot be created." + " Response body: " + bodyRepresentation; result = new IOException(message, e); } return result; } /** * Create a publisher that (1) emits error if the provided response {@code decodedResponse} has * 'disallowed status code' OR (2) emits provided response if it's status code ia allowed. * * 'disallowed status code' is one of the status code defined in the provided SwaggerMethodParser * or is in the int[] of additional allowed status codes. * * @param decodedResponse The HttpResponse to check. * @param methodParser The method parser that contains information about the service interface * method that initiated the HTTP request. * @param additionalAllowedStatusCodes Additional allowed status codes that are permitted based * on the context of the HTTP request. * @return An async-version of the provided decodedResponse. */ public Mono<HttpDecodedResponse> ensureExpectedStatus(final HttpDecodedResponse decodedResponse, final SwaggerMethodParser methodParser, int[] additionalAllowedStatusCodes) { final int responseStatusCode = decodedResponse.sourceResponse().statusCode(); final Mono<HttpDecodedResponse> asyncResult; if (!methodParser.isExpectedResponseStatusCode(responseStatusCode, additionalAllowedStatusCodes)) { Mono<String> bodyAsString = decodedResponse.sourceResponse().bodyAsString(); asyncResult = bodyAsString.flatMap((Function<String, Mono<HttpDecodedResponse>>) responseContent -> { Mono<Object> decodedErrorBody = decodedResponse.decodedBody(); return decodedErrorBody.flatMap((Function<Object, Mono<HttpDecodedResponse>>) responseDecodedErrorObject -> { Throwable exception = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode), decodedResponse.sourceResponse(), responseContent, responseDecodedErrorObject); return Mono.error(exception); }).switchIfEmpty(Mono.defer((Supplier<Mono<HttpDecodedResponse>>) () -> { Throwable exception = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode), decodedResponse.sourceResponse(), responseContent, null); return Mono.error(exception); })); }).switchIfEmpty(Mono.defer((Supplier<Mono<HttpDecodedResponse>>) () -> { Throwable exception = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode), decodedResponse.sourceResponse(), "", null); return Mono.error(exception); })); } else { asyncResult = Mono.just(decodedResponse); } return asyncResult; } private Mono<?> handleRestResponseReturnType(HttpDecodedResponse response, SwaggerMethodParser methodParser, Type entityType) { Mono<?> asyncResult; if (TypeUtil.isTypeOrSubTypeOf(entityType, Response.class)) { Type bodyType = TypeUtil.getRestResponseBodyType(entityType); if (TypeUtil.isTypeOrSubTypeOf(bodyType, Void.class)) { asyncResult = response.sourceResponse().body().ignoreElements() .then(Mono.just(createResponse(response, entityType, null))); } else { asyncResult = handleBodyReturnType(response, methodParser, bodyType) .map((Function<Object, Response<?>>) bodyAsObject -> createResponse(response, entityType, bodyAsObject)) .switchIfEmpty(Mono.defer((Supplier<Mono<Response<?>>>) () -> Mono.just(createResponse(response, entityType, null)))); } } else { asyncResult = handleBodyReturnType(response, methodParser, entityType); } return asyncResult; } @SuppressWarnings("unchecked") private Response<?> createResponse(HttpDecodedResponse response, Type entityType, Object bodyAsObject) { final HttpResponse httpResponse = response.sourceResponse(); final HttpRequest httpRequest = httpResponse.request(); final int responseStatusCode = httpResponse.statusCode(); final HttpHeaders responseHeaders = httpResponse.headers(); Class<? extends Response<?>> cls = (Class<? extends Response<?>>) TypeUtil.getRawClass(entityType); if (cls.equals(Response.class)) { cls = (Class<? extends Response<?>>) (Object) ResponseBase.class; } else if (cls.equals(PagedResponse.class)) { cls = (Class<? extends Response<?>>) (Object) PagedResponseBase.class; if (bodyAsObject != null && !TypeUtil.isTypeOrSubTypeOf(bodyAsObject.getClass(), Page.class)) { throw logger.logExceptionAsError(new RuntimeException("Unable to create PagedResponse<T>. Body must be of a type that implements: " + Page.class)); } } List<Constructor<?>> constructors = Arrays.stream(cls.getDeclaredConstructors()) .filter(constructor -> { int paramCount = constructor.getParameterCount(); return paramCount >= 3 && paramCount <= 5; }) .sorted(Comparator.comparingInt(Constructor::getParameterCount)) .collect(Collectors.toList()); if (constructors.isEmpty()) { throw logger.logExceptionAsError(new RuntimeException("Cannot find suitable constructor for class " + cls)); } for (Constructor<?> constructor : constructors) { final Constructor<? extends Response<?>> ctor = (Constructor<? extends Response<?>>) constructor; try { final int paramCount = constructor.getParameterCount(); switch (paramCount) { case 3: return ctor.newInstance(httpRequest, responseStatusCode, responseHeaders); case 4: return ctor.newInstance(httpRequest, responseStatusCode, responseHeaders, bodyAsObject); case 5: return ctor.newInstance(httpRequest, responseStatusCode, responseHeaders, bodyAsObject, response.decodedHeaders().block()); default: throw logger.logExceptionAsError(new IllegalStateException("Response constructor with expected parameters not found.")); } } catch (IllegalAccessException | InvocationTargetException | InstantiationException e) { throw logger.logExceptionAsError(reactor.core.Exceptions.propagate(e)); } } throw logger.logExceptionAsError(new RuntimeException("Cannot find suitable constructor for class " + cls)); } protected final Mono<?> handleBodyReturnType(final HttpDecodedResponse response, final SwaggerMethodParser methodParser, final Type entityType) { final int responseStatusCode = response.sourceResponse().statusCode(); final HttpMethod httpMethod = methodParser.httpMethod(); final Type returnValueWireType = methodParser.returnValueWireType(); final Mono<?> asyncResult; if (httpMethod == HttpMethod.HEAD && (TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.TYPE) || TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.class))) { boolean isSuccess = (responseStatusCode / 100) == 2; asyncResult = Mono.just(isSuccess); } else if (TypeUtil.isTypeOrSubTypeOf(entityType, byte[].class)) { Mono<byte[]> responseBodyBytesAsync = response.sourceResponse().bodyAsByteArray(); if (returnValueWireType == Base64Url.class) { responseBodyBytesAsync = responseBodyBytesAsync.map(base64UrlBytes -> new Base64Url(base64UrlBytes).decodedBytes()); } asyncResult = responseBodyBytesAsync; } else if (FluxUtil.isFluxByteBuffer(entityType)) { asyncResult = Mono.just(response.sourceResponse().body()); } else { asyncResult = response.decodedBody(); } return asyncResult; } protected Object handleHttpResponse(final HttpRequest httpRequest, Mono<HttpDecodedResponse> asyncDecodedHttpResponse, SwaggerMethodParser methodParser, Type returnType, Context context) { return handleRestReturnType(asyncDecodedHttpResponse, methodParser, returnType, context); } protected Object handleResumeOperation(HttpRequest httpRequest, OperationDescription operationDescription, SwaggerMethodParser methodParser, Type returnType, Context context) { throw logger.logExceptionAsError(Exceptions.propagate(new Exception("The resume operation is not available in the base RestProxy class."))); } /** * Handle the provided asynchronous HTTP response and return the deserialized value. * * @param asyncHttpDecodedResponse the asynchronous HTTP response to the original HTTP request * @param methodParser the SwaggerMethodParser that the request originates from * @param returnType the type of value that will be returned * @param context Additional context that is passed through the Http pipeline during the service call. * @return the deserialized result */ public final Object handleRestReturnType(Mono<HttpDecodedResponse> asyncHttpDecodedResponse, final SwaggerMethodParser methodParser, final Type returnType, Context context) { final Mono<HttpDecodedResponse> asyncExpectedResponse = ensureExpectedStatus(asyncHttpDecodedResponse, methodParser) .doOnEach(RestProxy::endTracingSpan) .subscriberContext(reactor.util.context.Context.of("TRACING_CONTEXT", context)); final Object result; if (TypeUtil.isTypeOrSubTypeOf(returnType, Mono.class)) { final Type monoTypeParam = TypeUtil.getTypeArgument(returnType); if (TypeUtil.isTypeOrSubTypeOf(monoTypeParam, Void.class)) { result = asyncExpectedResponse.then(); } else { result = asyncExpectedResponse.flatMap(response -> handleRestResponseReturnType(response, methodParser, monoTypeParam)); } } else if (FluxUtil.isFluxByteBuffer(returnType)) { result = asyncExpectedResponse.flatMapMany(ar -> ar.sourceResponse().body()); } else if (TypeUtil.isTypeOrSubTypeOf(returnType, void.class) || TypeUtil.isTypeOrSubTypeOf(returnType, Void.class)) { asyncExpectedResponse.block(); result = null; } else { result = asyncExpectedResponse .flatMap(httpResponse -> handleRestResponseReturnType(httpResponse, methodParser, returnType)) .block(); } return result; } private static void endTracingSpan(Signal<HttpDecodedResponse> signal) { if (signal.isOnComplete() || signal.isOnSubscribe()) { return; } reactor.util.context.Context context = signal.getContext(); Optional<Context> tracingContext = context.getOrEmpty("TRACING_CONTEXT"); if (!tracingContext.isPresent()) { return; } int statusCode = 0; HttpDecodedResponse httpDecodedResponse; Throwable throwable = null; if (signal.hasValue()) { httpDecodedResponse = signal.get(); statusCode = httpDecodedResponse.sourceResponse().statusCode(); } else if (signal.hasError()) { throwable = signal.getThrowable(); if (throwable instanceof HttpResponseException) { HttpResponseException exception = (HttpResponseException) throwable; statusCode = exception.response().statusCode(); } } TracerProxy.end(statusCode, throwable, tracingContext.get()); } /** * Create an instance of the default serializer. * * @return the default serializer */ private static SerializerAdapter createDefaultSerializer() { return JacksonAdapter.createDefaultSerializerAdapter(); } /** * Create the default HttpPipeline. * * @return the default HttpPipeline */ public static HttpPipeline createDefaultPipeline() { return createDefaultPipeline((HttpPipelinePolicy) null); } /** * Create the default HttpPipeline. * * @param credentials the credentials to use to apply authentication to the pipeline * @return the default HttpPipeline */ public static HttpPipeline createDefaultPipeline(TokenCredential credentials) { return createDefaultPipeline(new BearerTokenAuthenticationPolicy(credentials)); } /** * Create the default HttpPipeline. * @param credentialsPolicy the credentials policy factory to use to apply authentication to the * pipeline * @return the default HttpPipeline */ public static HttpPipeline createDefaultPipeline(HttpPipelinePolicy credentialsPolicy) { List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy()); policies.add(new RetryPolicy()); policies.add(new CookiePolicy()); if (credentialsPolicy != null) { policies.add(credentialsPolicy); } return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface) { return create(swaggerInterface, createDefaultPipeline(), createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) { return create(swaggerInterface, httpPipeline, createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http * requests * @param serializer the serializer that will be used to convert POJOs to and from request and * response bodies * @param <A> the type of the Swagger interface. * @return a proxy implementation of the provided Swagger interface */ @SuppressWarnings("unchecked") public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) { final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer); final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser); return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface}, restProxy); } }
Is this happening because the `UnexpectedLengthException` is reading the `ByteBuffer`? If so, is there any way the check could be implemented to not read the ByteBuffer and need it to be reset?
public Mono<HttpResponse> send(HttpRequest request, Context contextData) { if(request.body() != null){ request.body().map(ByteBuffer::reset); } return httpPipeline.send(request, contextData); }
request.body().map(ByteBuffer::reset);
public Mono<HttpResponse> send(HttpRequest request, Context contextData) { return httpPipeline.send(request, contextData); }
class RestProxy implements InvocationHandler { private final ClientLogger logger = new ClientLogger(RestProxy.class); private final HttpPipeline httpPipeline; private final SerializerAdapter serializer; private final SwaggerInterfaceParser interfaceParser; private final HttpResponseDecoder decoder; /** * Create a RestProxy. * * @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP * requests. * @param serializer the serializer that will be used to convert response bodies to POJOs. * @param interfaceParser the parser that contains information about the interface describing REST API methods * that this RestProxy "implements". */ public RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) { this.httpPipeline = httpPipeline; this.serializer = serializer; this.interfaceParser = interfaceParser; this.decoder = new HttpResponseDecoder(this.serializer); } /** * Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger * interface that this RestProxy was created to "implement". * * @param method the method to get a SwaggerMethodParser for * @return the SwaggerMethodParser for the provided method */ private SwaggerMethodParser methodParser(Method method) { return interfaceParser.methodParser(method); } /** * Get the SerializerAdapter used by this RestProxy. * * @return The SerializerAdapter used by this RestProxy */ public SerializerAdapter serializer() { return serializer; } /** * Send the provided request asynchronously, applying any request policies provided to the HttpClient instance. * * @param request the HTTP request to send * @param contextData the context * @return a {@link Mono} that emits HttpResponse asynchronously */ @Override public Object invoke(Object proxy, final Method method, Object[] args) { try { final SwaggerMethodParser methodParser; final HttpRequest request; if (method.isAnnotationPresent(ResumeOperation.class)) { OperationDescription opDesc = ImplUtils.findFirstOfType(args, OperationDescription.class); Method resumeMethod = determineResumeMethod(method, opDesc.methodName()); methodParser = methodParser(resumeMethod); request = createHttpRequest(opDesc, methodParser, args); final Type returnType = methodParser.returnType(); return handleResumeOperation(request, opDesc, methodParser, returnType, startTracingSpan(resumeMethod, Context.NONE)); } else { methodParser = methodParser(method); request = createHttpRequest(methodParser, args); Context context = methodParser.context(args).addData("caller-method", methodParser.fullyQualifiedMethodName()); context = startTracingSpan(method, context); if (request.body() != null) { request.body(validateLength(request, request.body())); } final Mono<HttpResponse> asyncResponse = send(request, context); Mono<HttpDecodedResponse> asyncDecodedResponse = this.decoder.decode(asyncResponse, methodParser); return handleHttpResponse(request, asyncDecodedResponse, methodParser, methodParser.returnType(), context); } } catch (Exception e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } } private Flux<ByteBuffer> validateLength(final HttpRequest request, final Flux<ByteBuffer> bbFlux) { if (bbFlux == null) { return Flux.empty(); } return Flux.defer(new Supplier<Publisher<ByteBuffer>>() { @Override public Publisher<ByteBuffer> get() { Long expectedLength = Long.valueOf(request.headers().value("Content-Length")); List<Integer> bufferLengthList = new ArrayList<>(); return bbFlux.doOnEach(s -> { if (s.isOnNext()) { Long currentTotalLength = Long.valueOf( bufferLengthList.stream().reduce(Integer::sum).orElse(0)) + s.get().remaining(); if (currentTotalLength > expectedLength) { throw new UnexpectedLengthException( String.format("Request body emitted %d bytes more than the expected %d bytes.", currentTotalLength, expectedLength), currentTotalLength, expectedLength); } bufferLengthList.add(s.get().remaining()); } else if (s.isOnComplete()) { Long currentTotalLength = Long.valueOf( bufferLengthList.stream().reduce(Integer::sum).orElse(0)); if (expectedLength.compareTo(currentTotalLength) != 0) { throw new UnexpectedLengthException( String.format("Request body emitted %d bytes less than the expected %d bytes.", currentTotalLength, expectedLength), currentTotalLength, expectedLength); } } else { logger.logExceptionAsError(new RuntimeException("Error occurs when validating " + "the request body legnth and the header content length. Error details: " + s.getThrowable().getMessage())); } }); } }); } private int len(ByteBuffer input) { int result = input.remaining(); return result; } private Method determineResumeMethod(Method method, String resumeMethodName) { for (Method potentialResumeMethod : method.getDeclaringClass().getMethods()) { if (potentialResumeMethod.getName().equals(resumeMethodName)) { return potentialResumeMethod; } } return null; } /** * Starts the tracing span for the current service call, additionally set metadata attributes on the span by passing * additional context information. * @param method Service method being called. * @param context Context information about the current service call. * @return The updated context containing the span context. */ private Context startTracingSpan(Method method, Context context) { String spanName = String.format("Azure.%s/%s", interfaceParser.serviceName(), method.getName()); context = TracerProxy.setSpanName(spanName, context); return TracerProxy.start(spanName, context); } /** * Create a HttpRequest for the provided Swagger method using the provided arguments. * * @param methodParser the Swagger method parser to use * @param args the arguments to use to populate the method's annotation values * @return a HttpRequest * @throws IOException thrown if the body contents cannot be serialized */ private HttpRequest createHttpRequest(SwaggerMethodParser methodParser, Object[] args) throws IOException { UrlBuilder urlBuilder; final String path = methodParser.path(args); final UrlBuilder pathUrlBuilder = UrlBuilder.parse(path); if (pathUrlBuilder.scheme() != null) { urlBuilder = pathUrlBuilder; } else { urlBuilder = new UrlBuilder(); final String scheme = methodParser.scheme(args); urlBuilder.scheme(scheme); final String host = methodParser.host(args); urlBuilder.host(host); if (path != null && !path.isEmpty() && !path.equals("/")) { String hostPath = urlBuilder.path(); if (hostPath == null || hostPath.isEmpty() || hostPath.equals("/")) { urlBuilder.path(path); } else { urlBuilder.path(hostPath + "/" + path); } } } for (final EncodedParameter queryParameter : methodParser.encodedQueryParameters(args)) { urlBuilder.setQueryParameter(queryParameter.name(), queryParameter.encodedValue()); } final URL url = urlBuilder.toURL(); final HttpRequest request = configRequest(new HttpRequest(methodParser.httpMethod(), url), methodParser, args); for (final HttpHeader header : methodParser.headers(args)) { request.header(header.name(), header.value()); } return request; } /** * Create a HttpRequest for the provided Swagger method using the provided arguments. * * @param methodParser the Swagger method parser to use * @param args the arguments to use to populate the method's annotation values * @return a HttpRequest * @throws IOException thrown if the body contents cannot be serialized */ private HttpRequest createHttpRequest(OperationDescription operationDescription, SwaggerMethodParser methodParser, Object[] args) throws IOException { final HttpRequest request = configRequest(new HttpRequest(methodParser.httpMethod(), operationDescription.url()), methodParser, args); for (final String headerName : operationDescription.headers().keySet()) { request.header(headerName, operationDescription.headers().get(headerName)); } return request; } @SuppressWarnings("unchecked") private HttpRequest configRequest(HttpRequest request, SwaggerMethodParser methodParser, Object[] args) throws IOException { final Object bodyContentObject = methodParser.body(args); if (bodyContentObject == null) { request.headers().put("Content-Length", "0"); } else { String contentType = methodParser.bodyContentType(); if (contentType == null || contentType.isEmpty()) { if (bodyContentObject instanceof byte[] || bodyContentObject instanceof String) { contentType = ContentType.APPLICATION_OCTET_STREAM; } else { contentType = ContentType.APPLICATION_JSON; } } request.headers().put("Content-Type", contentType); boolean isJson = false; final String[] contentTypeParts = contentType.split(";"); for (String contentTypePart : contentTypeParts) { if (contentTypePart.trim().equalsIgnoreCase(ContentType.APPLICATION_JSON)) { isJson = true; break; } } if (isJson) { final String bodyContentString = serializer.serialize(bodyContentObject, SerializerEncoding.JSON); request.body(bodyContentString); } else if (FluxUtil.isFluxByteBuffer(methodParser.bodyJavaType())) { request.body((Flux<ByteBuffer>) bodyContentObject); } else if (bodyContentObject instanceof byte[]) { request.body((byte[]) bodyContentObject); } else if (bodyContentObject instanceof String) { final String bodyContentString = (String) bodyContentObject; if (!bodyContentString.isEmpty()) { request.body(bodyContentString); } } else if(bodyContentObject instanceof ByteBuffer) { request.body(Flux.just(((ByteBuffer) bodyContentObject))); } else { final String bodyContentString = serializer.serialize(bodyContentObject, SerializerEncoding.fromHeaders(request.headers())); request.body(bodyContentString); } } return request; } private Mono<HttpDecodedResponse> ensureExpectedStatus(Mono<HttpDecodedResponse> asyncDecodedResponse, final SwaggerMethodParser methodParser) { return asyncDecodedResponse .flatMap(decodedHttpResponse -> ensureExpectedStatus(decodedHttpResponse, methodParser, null)); } private static Exception instantiateUnexpectedException(UnexpectedExceptionInformation exception, HttpResponse httpResponse, String responseContent, Object responseDecodedContent) { final int responseStatusCode = httpResponse.statusCode(); String contentType = httpResponse.headerValue("Content-Type"); String bodyRepresentation; if ("application/octet-stream".equalsIgnoreCase(contentType)) { bodyRepresentation = "(" + httpResponse.headerValue("Content-Length") + "-byte body)"; } else { bodyRepresentation = responseContent.isEmpty() ? "(empty body)" : "\"" + responseContent + "\""; } Exception result; try { final Constructor<? extends HttpResponseException> exceptionConstructor = exception.exceptionType().getConstructor(String.class, HttpResponse.class, exception.exceptionBodyType()); result = exceptionConstructor.newInstance("Status code " + responseStatusCode + ", " + bodyRepresentation, httpResponse, responseDecodedContent); } catch (ReflectiveOperationException e) { String message = "Status code " + responseStatusCode + ", but an instance of " + exception.exceptionType().getCanonicalName() + " cannot be created." + " Response body: " + bodyRepresentation; result = new IOException(message, e); } return result; } /** * Create a publisher that (1) emits error if the provided response {@code decodedResponse} has * 'disallowed status code' OR (2) emits provided response if it's status code ia allowed. * * 'disallowed status code' is one of the status code defined in the provided SwaggerMethodParser * or is in the int[] of additional allowed status codes. * * @param decodedResponse The HttpResponse to check. * @param methodParser The method parser that contains information about the service interface * method that initiated the HTTP request. * @param additionalAllowedStatusCodes Additional allowed status codes that are permitted based * on the context of the HTTP request. * @return An async-version of the provided decodedResponse. */ public Mono<HttpDecodedResponse> ensureExpectedStatus(final HttpDecodedResponse decodedResponse, final SwaggerMethodParser methodParser, int[] additionalAllowedStatusCodes) { final int responseStatusCode = decodedResponse.sourceResponse().statusCode(); final Mono<HttpDecodedResponse> asyncResult; if (!methodParser.isExpectedResponseStatusCode(responseStatusCode, additionalAllowedStatusCodes)) { Mono<String> bodyAsString = decodedResponse.sourceResponse().bodyAsString(); asyncResult = bodyAsString.flatMap((Function<String, Mono<HttpDecodedResponse>>) responseContent -> { Mono<Object> decodedErrorBody = decodedResponse.decodedBody(); return decodedErrorBody.flatMap((Function<Object, Mono<HttpDecodedResponse>>) responseDecodedErrorObject -> { Throwable exception = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode), decodedResponse.sourceResponse(), responseContent, responseDecodedErrorObject); return Mono.error(exception); }).switchIfEmpty(Mono.defer((Supplier<Mono<HttpDecodedResponse>>) () -> { Throwable exception = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode), decodedResponse.sourceResponse(), responseContent, null); return Mono.error(exception); })); }).switchIfEmpty(Mono.defer((Supplier<Mono<HttpDecodedResponse>>) () -> { Throwable exception = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode), decodedResponse.sourceResponse(), "", null); return Mono.error(exception); })); } else { asyncResult = Mono.just(decodedResponse); } return asyncResult; } private Mono<?> handleRestResponseReturnType(HttpDecodedResponse response, SwaggerMethodParser methodParser, Type entityType) { Mono<?> asyncResult; if (TypeUtil.isTypeOrSubTypeOf(entityType, Response.class)) { Type bodyType = TypeUtil.getRestResponseBodyType(entityType); if (TypeUtil.isTypeOrSubTypeOf(bodyType, Void.class)) { asyncResult = response.sourceResponse().body().ignoreElements() .then(Mono.just(createResponse(response, entityType, null))); } else { asyncResult = handleBodyReturnType(response, methodParser, bodyType) .map((Function<Object, Response<?>>) bodyAsObject -> createResponse(response, entityType, bodyAsObject)) .switchIfEmpty(Mono.defer((Supplier<Mono<Response<?>>>) () -> Mono.just(createResponse(response, entityType, null)))); } } else { asyncResult = handleBodyReturnType(response, methodParser, entityType); } return asyncResult; } @SuppressWarnings("unchecked") private Response<?> createResponse(HttpDecodedResponse response, Type entityType, Object bodyAsObject) { final HttpResponse httpResponse = response.sourceResponse(); final HttpRequest httpRequest = httpResponse.request(); final int responseStatusCode = httpResponse.statusCode(); final HttpHeaders responseHeaders = httpResponse.headers(); Class<? extends Response<?>> cls = (Class<? extends Response<?>>) TypeUtil.getRawClass(entityType); if (cls.equals(Response.class)) { cls = (Class<? extends Response<?>>) (Object) ResponseBase.class; } else if (cls.equals(PagedResponse.class)) { cls = (Class<? extends Response<?>>) (Object) PagedResponseBase.class; if (bodyAsObject != null && !TypeUtil.isTypeOrSubTypeOf(bodyAsObject.getClass(), Page.class)) { throw logger.logExceptionAsError(new RuntimeException("Unable to create PagedResponse<T>. Body must be of a type that implements: " + Page.class)); } } List<Constructor<?>> constructors = Arrays.stream(cls.getDeclaredConstructors()) .filter(constructor -> { int paramCount = constructor.getParameterCount(); return paramCount >= 3 && paramCount <= 5; }) .sorted(Comparator.comparingInt(Constructor::getParameterCount)) .collect(Collectors.toList()); if (constructors.isEmpty()) { throw logger.logExceptionAsError(new RuntimeException("Cannot find suitable constructor for class " + cls)); } for (Constructor<?> constructor : constructors) { final Constructor<? extends Response<?>> ctor = (Constructor<? extends Response<?>>) constructor; try { final int paramCount = constructor.getParameterCount(); switch (paramCount) { case 3: return ctor.newInstance(httpRequest, responseStatusCode, responseHeaders); case 4: return ctor.newInstance(httpRequest, responseStatusCode, responseHeaders, bodyAsObject); case 5: return ctor.newInstance(httpRequest, responseStatusCode, responseHeaders, bodyAsObject, response.decodedHeaders().block()); default: throw logger.logExceptionAsError(new IllegalStateException("Response constructor with expected parameters not found.")); } } catch (IllegalAccessException | InvocationTargetException | InstantiationException e) { throw logger.logExceptionAsError(reactor.core.Exceptions.propagate(e)); } } throw logger.logExceptionAsError(new RuntimeException("Cannot find suitable constructor for class " + cls)); } protected final Mono<?> handleBodyReturnType(final HttpDecodedResponse response, final SwaggerMethodParser methodParser, final Type entityType) { final int responseStatusCode = response.sourceResponse().statusCode(); final HttpMethod httpMethod = methodParser.httpMethod(); final Type returnValueWireType = methodParser.returnValueWireType(); final Mono<?> asyncResult; if (httpMethod == HttpMethod.HEAD && (TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.TYPE) || TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.class))) { boolean isSuccess = (responseStatusCode / 100) == 2; asyncResult = Mono.just(isSuccess); } else if (TypeUtil.isTypeOrSubTypeOf(entityType, byte[].class)) { Mono<byte[]> responseBodyBytesAsync = response.sourceResponse().bodyAsByteArray(); if (returnValueWireType == Base64Url.class) { responseBodyBytesAsync = responseBodyBytesAsync.map(base64UrlBytes -> new Base64Url(base64UrlBytes).decodedBytes()); } asyncResult = responseBodyBytesAsync; } else if (FluxUtil.isFluxByteBuffer(entityType)) { asyncResult = Mono.just(response.sourceResponse().body()); } else { asyncResult = response.decodedBody(); } return asyncResult; } protected Object handleHttpResponse(final HttpRequest httpRequest, Mono<HttpDecodedResponse> asyncDecodedHttpResponse, SwaggerMethodParser methodParser, Type returnType, Context context) { return handleRestReturnType(asyncDecodedHttpResponse, methodParser, returnType, context); } protected Object handleResumeOperation(HttpRequest httpRequest, OperationDescription operationDescription, SwaggerMethodParser methodParser, Type returnType, Context context) throws Exception { throw new Exception("The resume operation is not available in the base RestProxy class."); } /** * Handle the provided asynchronous HTTP response and return the deserialized value. * * @param asyncHttpDecodedResponse the asynchronous HTTP response to the original HTTP request * @param methodParser the SwaggerMethodParser that the request originates from * @param returnType the type of value that will be returned * @param context Additional context that is passed through the Http pipeline during the service call. * @return the deserialized result */ public final Object handleRestReturnType(Mono<HttpDecodedResponse> asyncHttpDecodedResponse, final SwaggerMethodParser methodParser, final Type returnType, Context context) { final Mono<HttpDecodedResponse> asyncExpectedResponse = ensureExpectedStatus(asyncHttpDecodedResponse, methodParser) .doOnEach(RestProxy::endTracingSpan) .subscriberContext(reactor.util.context.Context.of("TRACING_CONTEXT", context)); final Object result; if (TypeUtil.isTypeOrSubTypeOf(returnType, Mono.class)) { final Type monoTypeParam = TypeUtil.getTypeArgument(returnType); if (TypeUtil.isTypeOrSubTypeOf(monoTypeParam, Void.class)) { result = asyncExpectedResponse.then(); } else { result = asyncExpectedResponse.flatMap(response -> handleRestResponseReturnType(response, methodParser, monoTypeParam)); } } else if (FluxUtil.isFluxByteBuffer(returnType)) { result = asyncExpectedResponse.flatMapMany(ar -> ar.sourceResponse().body()); } else if (TypeUtil.isTypeOrSubTypeOf(returnType, void.class) || TypeUtil.isTypeOrSubTypeOf(returnType, Void.class)) { asyncExpectedResponse.block(); result = null; } else { result = asyncExpectedResponse .flatMap(httpResponse -> handleRestResponseReturnType(httpResponse, methodParser, returnType)) .block(); } return result; } private static void endTracingSpan(Signal<HttpDecodedResponse> signal) { if (signal.isOnComplete() || signal.isOnSubscribe()) { return; } reactor.util.context.Context context = signal.getContext(); Optional<Context> tracingContext = context.getOrEmpty("TRACING_CONTEXT"); if (!tracingContext.isPresent()) { return; } int statusCode = 0; HttpDecodedResponse httpDecodedResponse; Throwable throwable = null; if (signal.hasValue()) { httpDecodedResponse = signal.get(); statusCode = httpDecodedResponse.sourceResponse().statusCode(); } else if (signal.hasError()) { throwable = signal.getThrowable(); if (throwable instanceof HttpResponseException) { HttpResponseException exception = (HttpResponseException) throwable; statusCode = exception.response().statusCode(); } } TracerProxy.end(statusCode, throwable, tracingContext.get()); } /** * Create an instance of the default serializer. * * @return the default serializer */ private static SerializerAdapter createDefaultSerializer() { return JacksonAdapter.createDefaultSerializerAdapter(); } /** * Create the default HttpPipeline. * * @return the default HttpPipeline */ public static HttpPipeline createDefaultPipeline() { return createDefaultPipeline((HttpPipelinePolicy) null); } /** * Create the default HttpPipeline. * * @param credentials the credentials to use to apply authentication to the pipeline * @return the default HttpPipeline */ public static HttpPipeline createDefaultPipeline(TokenCredential credentials) { return createDefaultPipeline(new BearerTokenAuthenticationPolicy(credentials)); } /** * Create the default HttpPipeline. * @param credentialsPolicy the credentials policy factory to use to apply authentication to the * pipeline * @return the default HttpPipeline */ public static HttpPipeline createDefaultPipeline(HttpPipelinePolicy credentialsPolicy) { List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy()); policies.add(new RetryPolicy()); policies.add(new CookiePolicy()); if (credentialsPolicy != null) { policies.add(credentialsPolicy); } return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface) { return create(swaggerInterface, createDefaultPipeline(), createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) { return create(swaggerInterface, httpPipeline, createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http * requests * @param serializer the serializer that will be used to convert POJOs to and from request and * response bodies * @param <A> the type of the Swagger interface. * @return a proxy implementation of the provided Swagger interface */ @SuppressWarnings("unchecked") public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) { final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer); final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser); return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface}, restProxy); } }
class RestProxy implements InvocationHandler { private final ClientLogger logger = new ClientLogger(RestProxy.class); private final HttpPipeline httpPipeline; private final SerializerAdapter serializer; private final SwaggerInterfaceParser interfaceParser; private final HttpResponseDecoder decoder; /** * Create a RestProxy. * * @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP * requests. * @param serializer the serializer that will be used to convert response bodies to POJOs. * @param interfaceParser the parser that contains information about the interface describing REST API methods * that this RestProxy "implements". */ public RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) { this.httpPipeline = httpPipeline; this.serializer = serializer; this.interfaceParser = interfaceParser; this.decoder = new HttpResponseDecoder(this.serializer); } /** * Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger * interface that this RestProxy was created to "implement". * * @param method the method to get a SwaggerMethodParser for * @return the SwaggerMethodParser for the provided method */ private SwaggerMethodParser methodParser(Method method) { return interfaceParser.methodParser(method); } /** * Get the SerializerAdapter used by this RestProxy. * * @return The SerializerAdapter used by this RestProxy */ public SerializerAdapter serializer() { return serializer; } /** * Send the provided request asynchronously, applying any request policies provided to the HttpClient instance. * * @param request the HTTP request to send * @param contextData the context * @return a {@link Mono} that emits HttpResponse asynchronously */ @Override public Object invoke(Object proxy, final Method method, Object[] args) { try { final SwaggerMethodParser methodParser; final HttpRequest request; if (method.isAnnotationPresent(ResumeOperation.class)) { OperationDescription opDesc = ImplUtils.findFirstOfType(args, OperationDescription.class); Method resumeMethod = determineResumeMethod(method, opDesc.methodName()); methodParser = methodParser(resumeMethod); request = createHttpRequest(opDesc, methodParser, args); final Type returnType = methodParser.returnType(); return handleResumeOperation(request, opDesc, methodParser, returnType, startTracingSpan(resumeMethod, Context.NONE)); } else { methodParser = methodParser(method); request = createHttpRequest(methodParser, args); Context context = methodParser.context(args).addData("caller-method", methodParser.fullyQualifiedMethodName()); context = startTracingSpan(method, context); if (request.body() != null) { request.body(validateLength(request)); } final Mono<HttpResponse> asyncResponse = send(request, context); Mono<HttpDecodedResponse> asyncDecodedResponse = this.decoder.decode(asyncResponse, methodParser); return handleHttpResponse(request, asyncDecodedResponse, methodParser, methodParser.returnType(), context); } } catch (Exception e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } } private Flux<ByteBuffer> validateLength(final HttpRequest request) { final Flux<ByteBuffer> bbFlux = request.body(); if (bbFlux == null) { return Flux.empty(); } return Flux.defer(() -> { Long expectedLength = Long.valueOf(request.headers().value("Content-Length")); final long[] currentTotalLength = new long[1]; return bbFlux.doOnEach(s -> { if (s.isOnNext()) { ByteBuffer byteBuffer = s.get(); int currentLength = (byteBuffer == null) ? 0 : byteBuffer.remaining(); currentTotalLength[0] += currentLength; if (currentTotalLength[0] > expectedLength) { throw logger.logExceptionAsError(new UnexpectedLengthException( String.format("Request body emitted %d bytes more than the expected %d bytes.", currentTotalLength[0], expectedLength), currentTotalLength[0], expectedLength)); } } else if (s.isOnComplete()) { if (expectedLength.compareTo(currentTotalLength[0]) != 0) { throw logger.logExceptionAsError(new UnexpectedLengthException( String.format("Request body emitted %d bytes less than the expected %d bytes.", currentTotalLength[0], expectedLength), currentTotalLength[0], expectedLength)); } } }); }); } private Method determineResumeMethod(Method method, String resumeMethodName) { for (Method potentialResumeMethod : method.getDeclaringClass().getMethods()) { if (potentialResumeMethod.getName().equals(resumeMethodName)) { return potentialResumeMethod; } } return null; } /** * Starts the tracing span for the current service call, additionally set metadata attributes on the span by passing * additional context information. * @param method Service method being called. * @param context Context information about the current service call. * @return The updated context containing the span context. */ private Context startTracingSpan(Method method, Context context) { String spanName = String.format("Azure.%s/%s", interfaceParser.serviceName(), method.getName()); context = TracerProxy.setSpanName(spanName, context); return TracerProxy.start(spanName, context); } /** * Create a HttpRequest for the provided Swagger method using the provided arguments. * * @param methodParser the Swagger method parser to use * @param args the arguments to use to populate the method's annotation values * @return a HttpRequest * @throws IOException thrown if the body contents cannot be serialized */ private HttpRequest createHttpRequest(SwaggerMethodParser methodParser, Object[] args) throws IOException { UrlBuilder urlBuilder; final String path = methodParser.path(args); final UrlBuilder pathUrlBuilder = UrlBuilder.parse(path); if (pathUrlBuilder.scheme() != null) { urlBuilder = pathUrlBuilder; } else { urlBuilder = new UrlBuilder(); final String scheme = methodParser.scheme(args); urlBuilder.scheme(scheme); final String host = methodParser.host(args); urlBuilder.host(host); if (path != null && !path.isEmpty() && !path.equals("/")) { String hostPath = urlBuilder.path(); if (hostPath == null || hostPath.isEmpty() || hostPath.equals("/")) { urlBuilder.path(path); } else { urlBuilder.path(hostPath + "/" + path); } } } for (final EncodedParameter queryParameter : methodParser.encodedQueryParameters(args)) { urlBuilder.setQueryParameter(queryParameter.name(), queryParameter.encodedValue()); } final URL url = urlBuilder.toURL(); final HttpRequest request = configRequest(new HttpRequest(methodParser.httpMethod(), url), methodParser, args); for (final HttpHeader header : methodParser.headers(args)) { request.header(header.name(), header.value()); } return request; } /** * Create a HttpRequest for the provided Swagger method using the provided arguments. * * @param methodParser the Swagger method parser to use * @param args the arguments to use to populate the method's annotation values * @return a HttpRequest * @throws IOException thrown if the body contents cannot be serialized */ private HttpRequest createHttpRequest(OperationDescription operationDescription, SwaggerMethodParser methodParser, Object[] args) throws IOException { final HttpRequest request = configRequest(new HttpRequest(methodParser.httpMethod(), operationDescription.url()), methodParser, args); for (final String headerName : operationDescription.headers().keySet()) { request.header(headerName, operationDescription.headers().get(headerName)); } return request; } @SuppressWarnings("unchecked") private HttpRequest configRequest(HttpRequest request, SwaggerMethodParser methodParser, Object[] args) throws IOException { final Object bodyContentObject = methodParser.body(args); if (bodyContentObject == null) { request.headers().put("Content-Length", "0"); } else { String contentType = methodParser.bodyContentType(); if (contentType == null || contentType.isEmpty()) { if (bodyContentObject instanceof byte[] || bodyContentObject instanceof String) { contentType = ContentType.APPLICATION_OCTET_STREAM; } else { contentType = ContentType.APPLICATION_JSON; } } request.headers().put("Content-Type", contentType); boolean isJson = false; final String[] contentTypeParts = contentType.split(";"); for (String contentTypePart : contentTypeParts) { if (contentTypePart.trim().equalsIgnoreCase(ContentType.APPLICATION_JSON)) { isJson = true; break; } } if (isJson) { final String bodyContentString = serializer.serialize(bodyContentObject, SerializerEncoding.JSON); request.body(bodyContentString); } else if (FluxUtil.isFluxByteBuffer(methodParser.bodyJavaType())) { request.body((Flux<ByteBuffer>) bodyContentObject); } else if (bodyContentObject instanceof byte[]) { request.body((byte[]) bodyContentObject); } else if (bodyContentObject instanceof String) { final String bodyContentString = (String) bodyContentObject; if (!bodyContentString.isEmpty()) { request.body(bodyContentString); } } else if (bodyContentObject instanceof ByteBuffer) { request.body(Flux.just((ByteBuffer) bodyContentObject)); } else { final String bodyContentString = serializer.serialize(bodyContentObject, SerializerEncoding.fromHeaders(request.headers())); request.body(bodyContentString); } } return request; } private Mono<HttpDecodedResponse> ensureExpectedStatus(Mono<HttpDecodedResponse> asyncDecodedResponse, final SwaggerMethodParser methodParser) { return asyncDecodedResponse .flatMap(decodedHttpResponse -> ensureExpectedStatus(decodedHttpResponse, methodParser, null)); } private static Exception instantiateUnexpectedException(UnexpectedExceptionInformation exception, HttpResponse httpResponse, String responseContent, Object responseDecodedContent) { final int responseStatusCode = httpResponse.statusCode(); String contentType = httpResponse.headerValue("Content-Type"); String bodyRepresentation; if ("application/octet-stream".equalsIgnoreCase(contentType)) { bodyRepresentation = "(" + httpResponse.headerValue("Content-Length") + "-byte body)"; } else { bodyRepresentation = responseContent.isEmpty() ? "(empty body)" : "\"" + responseContent + "\""; } Exception result; try { final Constructor<? extends HttpResponseException> exceptionConstructor = exception.exceptionType().getConstructor(String.class, HttpResponse.class, exception.exceptionBodyType()); result = exceptionConstructor.newInstance("Status code " + responseStatusCode + ", " + bodyRepresentation, httpResponse, responseDecodedContent); } catch (ReflectiveOperationException e) { String message = "Status code " + responseStatusCode + ", but an instance of " + exception.exceptionType().getCanonicalName() + " cannot be created." + " Response body: " + bodyRepresentation; result = new IOException(message, e); } return result; } /** * Create a publisher that (1) emits error if the provided response {@code decodedResponse} has * 'disallowed status code' OR (2) emits provided response if it's status code ia allowed. * * 'disallowed status code' is one of the status code defined in the provided SwaggerMethodParser * or is in the int[] of additional allowed status codes. * * @param decodedResponse The HttpResponse to check. * @param methodParser The method parser that contains information about the service interface * method that initiated the HTTP request. * @param additionalAllowedStatusCodes Additional allowed status codes that are permitted based * on the context of the HTTP request. * @return An async-version of the provided decodedResponse. */ public Mono<HttpDecodedResponse> ensureExpectedStatus(final HttpDecodedResponse decodedResponse, final SwaggerMethodParser methodParser, int[] additionalAllowedStatusCodes) { final int responseStatusCode = decodedResponse.sourceResponse().statusCode(); final Mono<HttpDecodedResponse> asyncResult; if (!methodParser.isExpectedResponseStatusCode(responseStatusCode, additionalAllowedStatusCodes)) { Mono<String> bodyAsString = decodedResponse.sourceResponse().bodyAsString(); asyncResult = bodyAsString.flatMap((Function<String, Mono<HttpDecodedResponse>>) responseContent -> { Mono<Object> decodedErrorBody = decodedResponse.decodedBody(); return decodedErrorBody.flatMap((Function<Object, Mono<HttpDecodedResponse>>) responseDecodedErrorObject -> { Throwable exception = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode), decodedResponse.sourceResponse(), responseContent, responseDecodedErrorObject); return Mono.error(exception); }).switchIfEmpty(Mono.defer((Supplier<Mono<HttpDecodedResponse>>) () -> { Throwable exception = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode), decodedResponse.sourceResponse(), responseContent, null); return Mono.error(exception); })); }).switchIfEmpty(Mono.defer((Supplier<Mono<HttpDecodedResponse>>) () -> { Throwable exception = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode), decodedResponse.sourceResponse(), "", null); return Mono.error(exception); })); } else { asyncResult = Mono.just(decodedResponse); } return asyncResult; } private Mono<?> handleRestResponseReturnType(HttpDecodedResponse response, SwaggerMethodParser methodParser, Type entityType) { Mono<?> asyncResult; if (TypeUtil.isTypeOrSubTypeOf(entityType, Response.class)) { Type bodyType = TypeUtil.getRestResponseBodyType(entityType); if (TypeUtil.isTypeOrSubTypeOf(bodyType, Void.class)) { asyncResult = response.sourceResponse().body().ignoreElements() .then(Mono.just(createResponse(response, entityType, null))); } else { asyncResult = handleBodyReturnType(response, methodParser, bodyType) .map((Function<Object, Response<?>>) bodyAsObject -> createResponse(response, entityType, bodyAsObject)) .switchIfEmpty(Mono.defer((Supplier<Mono<Response<?>>>) () -> Mono.just(createResponse(response, entityType, null)))); } } else { asyncResult = handleBodyReturnType(response, methodParser, entityType); } return asyncResult; } @SuppressWarnings("unchecked") private Response<?> createResponse(HttpDecodedResponse response, Type entityType, Object bodyAsObject) { final HttpResponse httpResponse = response.sourceResponse(); final HttpRequest httpRequest = httpResponse.request(); final int responseStatusCode = httpResponse.statusCode(); final HttpHeaders responseHeaders = httpResponse.headers(); Class<? extends Response<?>> cls = (Class<? extends Response<?>>) TypeUtil.getRawClass(entityType); if (cls.equals(Response.class)) { cls = (Class<? extends Response<?>>) (Object) ResponseBase.class; } else if (cls.equals(PagedResponse.class)) { cls = (Class<? extends Response<?>>) (Object) PagedResponseBase.class; if (bodyAsObject != null && !TypeUtil.isTypeOrSubTypeOf(bodyAsObject.getClass(), Page.class)) { throw logger.logExceptionAsError(new RuntimeException("Unable to create PagedResponse<T>. Body must be of a type that implements: " + Page.class)); } } List<Constructor<?>> constructors = Arrays.stream(cls.getDeclaredConstructors()) .filter(constructor -> { int paramCount = constructor.getParameterCount(); return paramCount >= 3 && paramCount <= 5; }) .sorted(Comparator.comparingInt(Constructor::getParameterCount)) .collect(Collectors.toList()); if (constructors.isEmpty()) { throw logger.logExceptionAsError(new RuntimeException("Cannot find suitable constructor for class " + cls)); } for (Constructor<?> constructor : constructors) { final Constructor<? extends Response<?>> ctor = (Constructor<? extends Response<?>>) constructor; try { final int paramCount = constructor.getParameterCount(); switch (paramCount) { case 3: return ctor.newInstance(httpRequest, responseStatusCode, responseHeaders); case 4: return ctor.newInstance(httpRequest, responseStatusCode, responseHeaders, bodyAsObject); case 5: return ctor.newInstance(httpRequest, responseStatusCode, responseHeaders, bodyAsObject, response.decodedHeaders().block()); default: throw logger.logExceptionAsError(new IllegalStateException("Response constructor with expected parameters not found.")); } } catch (IllegalAccessException | InvocationTargetException | InstantiationException e) { throw logger.logExceptionAsError(reactor.core.Exceptions.propagate(e)); } } throw logger.logExceptionAsError(new RuntimeException("Cannot find suitable constructor for class " + cls)); } protected final Mono<?> handleBodyReturnType(final HttpDecodedResponse response, final SwaggerMethodParser methodParser, final Type entityType) { final int responseStatusCode = response.sourceResponse().statusCode(); final HttpMethod httpMethod = methodParser.httpMethod(); final Type returnValueWireType = methodParser.returnValueWireType(); final Mono<?> asyncResult; if (httpMethod == HttpMethod.HEAD && (TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.TYPE) || TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.class))) { boolean isSuccess = (responseStatusCode / 100) == 2; asyncResult = Mono.just(isSuccess); } else if (TypeUtil.isTypeOrSubTypeOf(entityType, byte[].class)) { Mono<byte[]> responseBodyBytesAsync = response.sourceResponse().bodyAsByteArray(); if (returnValueWireType == Base64Url.class) { responseBodyBytesAsync = responseBodyBytesAsync.map(base64UrlBytes -> new Base64Url(base64UrlBytes).decodedBytes()); } asyncResult = responseBodyBytesAsync; } else if (FluxUtil.isFluxByteBuffer(entityType)) { asyncResult = Mono.just(response.sourceResponse().body()); } else { asyncResult = response.decodedBody(); } return asyncResult; } protected Object handleHttpResponse(final HttpRequest httpRequest, Mono<HttpDecodedResponse> asyncDecodedHttpResponse, SwaggerMethodParser methodParser, Type returnType, Context context) { return handleRestReturnType(asyncDecodedHttpResponse, methodParser, returnType, context); } protected Object handleResumeOperation(HttpRequest httpRequest, OperationDescription operationDescription, SwaggerMethodParser methodParser, Type returnType, Context context) { throw logger.logExceptionAsError(Exceptions.propagate(new Exception("The resume operation is not available in the base RestProxy class."))); } /** * Handle the provided asynchronous HTTP response and return the deserialized value. * * @param asyncHttpDecodedResponse the asynchronous HTTP response to the original HTTP request * @param methodParser the SwaggerMethodParser that the request originates from * @param returnType the type of value that will be returned * @param context Additional context that is passed through the Http pipeline during the service call. * @return the deserialized result */ public final Object handleRestReturnType(Mono<HttpDecodedResponse> asyncHttpDecodedResponse, final SwaggerMethodParser methodParser, final Type returnType, Context context) { final Mono<HttpDecodedResponse> asyncExpectedResponse = ensureExpectedStatus(asyncHttpDecodedResponse, methodParser) .doOnEach(RestProxy::endTracingSpan) .subscriberContext(reactor.util.context.Context.of("TRACING_CONTEXT", context)); final Object result; if (TypeUtil.isTypeOrSubTypeOf(returnType, Mono.class)) { final Type monoTypeParam = TypeUtil.getTypeArgument(returnType); if (TypeUtil.isTypeOrSubTypeOf(monoTypeParam, Void.class)) { result = asyncExpectedResponse.then(); } else { result = asyncExpectedResponse.flatMap(response -> handleRestResponseReturnType(response, methodParser, monoTypeParam)); } } else if (FluxUtil.isFluxByteBuffer(returnType)) { result = asyncExpectedResponse.flatMapMany(ar -> ar.sourceResponse().body()); } else if (TypeUtil.isTypeOrSubTypeOf(returnType, void.class) || TypeUtil.isTypeOrSubTypeOf(returnType, Void.class)) { asyncExpectedResponse.block(); result = null; } else { result = asyncExpectedResponse .flatMap(httpResponse -> handleRestResponseReturnType(httpResponse, methodParser, returnType)) .block(); } return result; } private static void endTracingSpan(Signal<HttpDecodedResponse> signal) { if (signal.isOnComplete() || signal.isOnSubscribe()) { return; } reactor.util.context.Context context = signal.getContext(); Optional<Context> tracingContext = context.getOrEmpty("TRACING_CONTEXT"); if (!tracingContext.isPresent()) { return; } int statusCode = 0; HttpDecodedResponse httpDecodedResponse; Throwable throwable = null; if (signal.hasValue()) { httpDecodedResponse = signal.get(); statusCode = httpDecodedResponse.sourceResponse().statusCode(); } else if (signal.hasError()) { throwable = signal.getThrowable(); if (throwable instanceof HttpResponseException) { HttpResponseException exception = (HttpResponseException) throwable; statusCode = exception.response().statusCode(); } } TracerProxy.end(statusCode, throwable, tracingContext.get()); } /** * Create an instance of the default serializer. * * @return the default serializer */ private static SerializerAdapter createDefaultSerializer() { return JacksonAdapter.createDefaultSerializerAdapter(); } /** * Create the default HttpPipeline. * * @return the default HttpPipeline */ public static HttpPipeline createDefaultPipeline() { return createDefaultPipeline((HttpPipelinePolicy) null); } /** * Create the default HttpPipeline. * * @param credentials the credentials to use to apply authentication to the pipeline * @return the default HttpPipeline */ public static HttpPipeline createDefaultPipeline(TokenCredential credentials) { return createDefaultPipeline(new BearerTokenAuthenticationPolicy(credentials)); } /** * Create the default HttpPipeline. * @param credentialsPolicy the credentials policy factory to use to apply authentication to the * pipeline * @return the default HttpPipeline */ public static HttpPipeline createDefaultPipeline(HttpPipelinePolicy credentialsPolicy) { List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy()); policies.add(new RetryPolicy()); policies.add(new CookiePolicy()); if (credentialsPolicy != null) { policies.add(credentialsPolicy); } return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface) { return create(swaggerInterface, createDefaultPipeline(), createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) { return create(swaggerInterface, httpPipeline, createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http * requests * @param serializer the serializer that will be used to convert POJOs to and from request and * response bodies * @param <A> the type of the Swagger interface. * @return a proxy implementation of the provided Swagger interface */ @SuppressWarnings("unchecked") public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) { final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer); final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser); return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface}, restProxy); } }
Extra set of parentheses here.
private HttpRequest configRequest(HttpRequest request, SwaggerMethodParser methodParser, Object[] args) throws IOException { final Object bodyContentObject = methodParser.body(args); if (bodyContentObject == null) { request.headers().put("Content-Length", "0"); } else { String contentType = methodParser.bodyContentType(); if (contentType == null || contentType.isEmpty()) { if (bodyContentObject instanceof byte[] || bodyContentObject instanceof String) { contentType = ContentType.APPLICATION_OCTET_STREAM; } else { contentType = ContentType.APPLICATION_JSON; } } request.headers().put("Content-Type", contentType); boolean isJson = false; final String[] contentTypeParts = contentType.split(";"); for (String contentTypePart : contentTypeParts) { if (contentTypePart.trim().equalsIgnoreCase(ContentType.APPLICATION_JSON)) { isJson = true; break; } } if (isJson) { final String bodyContentString = serializer.serialize(bodyContentObject, SerializerEncoding.JSON); request.body(bodyContentString); } else if (FluxUtil.isFluxByteBuffer(methodParser.bodyJavaType())) { request.body((Flux<ByteBuffer>) bodyContentObject); } else if (bodyContentObject instanceof byte[]) { request.body((byte[]) bodyContentObject); } else if (bodyContentObject instanceof String) { final String bodyContentString = (String) bodyContentObject; if (!bodyContentString.isEmpty()) { request.body(bodyContentString); } } else if(bodyContentObject instanceof ByteBuffer) { request.body(Flux.just(((ByteBuffer) bodyContentObject))); } else { final String bodyContentString = serializer.serialize(bodyContentObject, SerializerEncoding.fromHeaders(request.headers())); request.body(bodyContentString); } } return request; }
request.body(Flux.just(((ByteBuffer) bodyContentObject)));
private HttpRequest configRequest(HttpRequest request, SwaggerMethodParser methodParser, Object[] args) throws IOException { final Object bodyContentObject = methodParser.body(args); if (bodyContentObject == null) { request.headers().put("Content-Length", "0"); } else { String contentType = methodParser.bodyContentType(); if (contentType == null || contentType.isEmpty()) { if (bodyContentObject instanceof byte[] || bodyContentObject instanceof String) { contentType = ContentType.APPLICATION_OCTET_STREAM; } else { contentType = ContentType.APPLICATION_JSON; } } request.headers().put("Content-Type", contentType); boolean isJson = false; final String[] contentTypeParts = contentType.split(";"); for (String contentTypePart : contentTypeParts) { if (contentTypePart.trim().equalsIgnoreCase(ContentType.APPLICATION_JSON)) { isJson = true; break; } } if (isJson) { final String bodyContentString = serializer.serialize(bodyContentObject, SerializerEncoding.JSON); request.body(bodyContentString); } else if (FluxUtil.isFluxByteBuffer(methodParser.bodyJavaType())) { request.body((Flux<ByteBuffer>) bodyContentObject); } else if (bodyContentObject instanceof byte[]) { request.body((byte[]) bodyContentObject); } else if (bodyContentObject instanceof String) { final String bodyContentString = (String) bodyContentObject; if (!bodyContentString.isEmpty()) { request.body(bodyContentString); } } else if (bodyContentObject instanceof ByteBuffer) { request.body(Flux.just((ByteBuffer) bodyContentObject)); } else { final String bodyContentString = serializer.serialize(bodyContentObject, SerializerEncoding.fromHeaders(request.headers())); request.body(bodyContentString); } } return request; }
class RestProxy implements InvocationHandler { private final ClientLogger logger = new ClientLogger(RestProxy.class); private final HttpPipeline httpPipeline; private final SerializerAdapter serializer; private final SwaggerInterfaceParser interfaceParser; private final HttpResponseDecoder decoder; /** * Create a RestProxy. * * @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP * requests. * @param serializer the serializer that will be used to convert response bodies to POJOs. * @param interfaceParser the parser that contains information about the interface describing REST API methods * that this RestProxy "implements". */ public RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) { this.httpPipeline = httpPipeline; this.serializer = serializer; this.interfaceParser = interfaceParser; this.decoder = new HttpResponseDecoder(this.serializer); } /** * Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger * interface that this RestProxy was created to "implement". * * @param method the method to get a SwaggerMethodParser for * @return the SwaggerMethodParser for the provided method */ private SwaggerMethodParser methodParser(Method method) { return interfaceParser.methodParser(method); } /** * Get the SerializerAdapter used by this RestProxy. * * @return The SerializerAdapter used by this RestProxy */ public SerializerAdapter serializer() { return serializer; } /** * Send the provided request asynchronously, applying any request policies provided to the HttpClient instance. * * @param request the HTTP request to send * @param contextData the context * @return a {@link Mono} that emits HttpResponse asynchronously */ public Mono<HttpResponse> send(HttpRequest request, Context contextData) { if(request.body() != null){ request.body().map(ByteBuffer::reset); } return httpPipeline.send(request, contextData); } @Override public Object invoke(Object proxy, final Method method, Object[] args) { try { final SwaggerMethodParser methodParser; final HttpRequest request; if (method.isAnnotationPresent(ResumeOperation.class)) { OperationDescription opDesc = ImplUtils.findFirstOfType(args, OperationDescription.class); Method resumeMethod = determineResumeMethod(method, opDesc.methodName()); methodParser = methodParser(resumeMethod); request = createHttpRequest(opDesc, methodParser, args); final Type returnType = methodParser.returnType(); return handleResumeOperation(request, opDesc, methodParser, returnType, startTracingSpan(resumeMethod, Context.NONE)); } else { methodParser = methodParser(method); request = createHttpRequest(methodParser, args); Context context = methodParser.context(args).addData("caller-method", methodParser.fullyQualifiedMethodName()); context = startTracingSpan(method, context); if (request.body() != null) { request.body(validateLength(request, request.body())); } final Mono<HttpResponse> asyncResponse = send(request, context); Mono<HttpDecodedResponse> asyncDecodedResponse = this.decoder.decode(asyncResponse, methodParser); return handleHttpResponse(request, asyncDecodedResponse, methodParser, methodParser.returnType(), context); } } catch (Exception e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } } private Flux<ByteBuffer> validateLength(final HttpRequest request, final Flux<ByteBuffer> bbFlux) { if (bbFlux == null) { return Flux.empty(); } return Flux.defer(new Supplier<Publisher<ByteBuffer>>() { @Override public Publisher<ByteBuffer> get() { Long expectedLength = Long.valueOf(request.headers().value("Content-Length")); List<Integer> bufferLengthList = new ArrayList<>(); return bbFlux.doOnEach(s -> { if (s.isOnNext()) { Long currentTotalLength = Long.valueOf( bufferLengthList.stream().reduce(Integer::sum).orElse(0)) + s.get().remaining(); if (currentTotalLength > expectedLength) { throw new UnexpectedLengthException( String.format("Request body emitted %d bytes more than the expected %d bytes.", currentTotalLength, expectedLength), currentTotalLength, expectedLength); } bufferLengthList.add(s.get().remaining()); } else if (s.isOnComplete()) { Long currentTotalLength = Long.valueOf( bufferLengthList.stream().reduce(Integer::sum).orElse(0)); if (expectedLength.compareTo(currentTotalLength) != 0) { throw new UnexpectedLengthException( String.format("Request body emitted %d bytes less than the expected %d bytes.", currentTotalLength, expectedLength), currentTotalLength, expectedLength); } } else { logger.logExceptionAsError(new RuntimeException("Error occurs when validating " + "the request body legnth and the header content length. Error details: " + s.getThrowable().getMessage())); } }); } }); } private int len(ByteBuffer input) { int result = input.remaining(); return result; } private Method determineResumeMethod(Method method, String resumeMethodName) { for (Method potentialResumeMethod : method.getDeclaringClass().getMethods()) { if (potentialResumeMethod.getName().equals(resumeMethodName)) { return potentialResumeMethod; } } return null; } /** * Starts the tracing span for the current service call, additionally set metadata attributes on the span by passing * additional context information. * @param method Service method being called. * @param context Context information about the current service call. * @return The updated context containing the span context. */ private Context startTracingSpan(Method method, Context context) { String spanName = String.format("Azure.%s/%s", interfaceParser.serviceName(), method.getName()); context = TracerProxy.setSpanName(spanName, context); return TracerProxy.start(spanName, context); } /** * Create a HttpRequest for the provided Swagger method using the provided arguments. * * @param methodParser the Swagger method parser to use * @param args the arguments to use to populate the method's annotation values * @return a HttpRequest * @throws IOException thrown if the body contents cannot be serialized */ private HttpRequest createHttpRequest(SwaggerMethodParser methodParser, Object[] args) throws IOException { UrlBuilder urlBuilder; final String path = methodParser.path(args); final UrlBuilder pathUrlBuilder = UrlBuilder.parse(path); if (pathUrlBuilder.scheme() != null) { urlBuilder = pathUrlBuilder; } else { urlBuilder = new UrlBuilder(); final String scheme = methodParser.scheme(args); urlBuilder.scheme(scheme); final String host = methodParser.host(args); urlBuilder.host(host); if (path != null && !path.isEmpty() && !path.equals("/")) { String hostPath = urlBuilder.path(); if (hostPath == null || hostPath.isEmpty() || hostPath.equals("/")) { urlBuilder.path(path); } else { urlBuilder.path(hostPath + "/" + path); } } } for (final EncodedParameter queryParameter : methodParser.encodedQueryParameters(args)) { urlBuilder.setQueryParameter(queryParameter.name(), queryParameter.encodedValue()); } final URL url = urlBuilder.toURL(); final HttpRequest request = configRequest(new HttpRequest(methodParser.httpMethod(), url), methodParser, args); for (final HttpHeader header : methodParser.headers(args)) { request.header(header.name(), header.value()); } return request; } /** * Create a HttpRequest for the provided Swagger method using the provided arguments. * * @param methodParser the Swagger method parser to use * @param args the arguments to use to populate the method's annotation values * @return a HttpRequest * @throws IOException thrown if the body contents cannot be serialized */ private HttpRequest createHttpRequest(OperationDescription operationDescription, SwaggerMethodParser methodParser, Object[] args) throws IOException { final HttpRequest request = configRequest(new HttpRequest(methodParser.httpMethod(), operationDescription.url()), methodParser, args); for (final String headerName : operationDescription.headers().keySet()) { request.header(headerName, operationDescription.headers().get(headerName)); } return request; } @SuppressWarnings("unchecked") private Mono<HttpDecodedResponse> ensureExpectedStatus(Mono<HttpDecodedResponse> asyncDecodedResponse, final SwaggerMethodParser methodParser) { return asyncDecodedResponse .flatMap(decodedHttpResponse -> ensureExpectedStatus(decodedHttpResponse, methodParser, null)); } private static Exception instantiateUnexpectedException(UnexpectedExceptionInformation exception, HttpResponse httpResponse, String responseContent, Object responseDecodedContent) { final int responseStatusCode = httpResponse.statusCode(); String contentType = httpResponse.headerValue("Content-Type"); String bodyRepresentation; if ("application/octet-stream".equalsIgnoreCase(contentType)) { bodyRepresentation = "(" + httpResponse.headerValue("Content-Length") + "-byte body)"; } else { bodyRepresentation = responseContent.isEmpty() ? "(empty body)" : "\"" + responseContent + "\""; } Exception result; try { final Constructor<? extends HttpResponseException> exceptionConstructor = exception.exceptionType().getConstructor(String.class, HttpResponse.class, exception.exceptionBodyType()); result = exceptionConstructor.newInstance("Status code " + responseStatusCode + ", " + bodyRepresentation, httpResponse, responseDecodedContent); } catch (ReflectiveOperationException e) { String message = "Status code " + responseStatusCode + ", but an instance of " + exception.exceptionType().getCanonicalName() + " cannot be created." + " Response body: " + bodyRepresentation; result = new IOException(message, e); } return result; } /** * Create a publisher that (1) emits error if the provided response {@code decodedResponse} has * 'disallowed status code' OR (2) emits provided response if it's status code ia allowed. * * 'disallowed status code' is one of the status code defined in the provided SwaggerMethodParser * or is in the int[] of additional allowed status codes. * * @param decodedResponse The HttpResponse to check. * @param methodParser The method parser that contains information about the service interface * method that initiated the HTTP request. * @param additionalAllowedStatusCodes Additional allowed status codes that are permitted based * on the context of the HTTP request. * @return An async-version of the provided decodedResponse. */ public Mono<HttpDecodedResponse> ensureExpectedStatus(final HttpDecodedResponse decodedResponse, final SwaggerMethodParser methodParser, int[] additionalAllowedStatusCodes) { final int responseStatusCode = decodedResponse.sourceResponse().statusCode(); final Mono<HttpDecodedResponse> asyncResult; if (!methodParser.isExpectedResponseStatusCode(responseStatusCode, additionalAllowedStatusCodes)) { Mono<String> bodyAsString = decodedResponse.sourceResponse().bodyAsString(); asyncResult = bodyAsString.flatMap((Function<String, Mono<HttpDecodedResponse>>) responseContent -> { Mono<Object> decodedErrorBody = decodedResponse.decodedBody(); return decodedErrorBody.flatMap((Function<Object, Mono<HttpDecodedResponse>>) responseDecodedErrorObject -> { Throwable exception = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode), decodedResponse.sourceResponse(), responseContent, responseDecodedErrorObject); return Mono.error(exception); }).switchIfEmpty(Mono.defer((Supplier<Mono<HttpDecodedResponse>>) () -> { Throwable exception = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode), decodedResponse.sourceResponse(), responseContent, null); return Mono.error(exception); })); }).switchIfEmpty(Mono.defer((Supplier<Mono<HttpDecodedResponse>>) () -> { Throwable exception = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode), decodedResponse.sourceResponse(), "", null); return Mono.error(exception); })); } else { asyncResult = Mono.just(decodedResponse); } return asyncResult; } private Mono<?> handleRestResponseReturnType(HttpDecodedResponse response, SwaggerMethodParser methodParser, Type entityType) { Mono<?> asyncResult; if (TypeUtil.isTypeOrSubTypeOf(entityType, Response.class)) { Type bodyType = TypeUtil.getRestResponseBodyType(entityType); if (TypeUtil.isTypeOrSubTypeOf(bodyType, Void.class)) { asyncResult = response.sourceResponse().body().ignoreElements() .then(Mono.just(createResponse(response, entityType, null))); } else { asyncResult = handleBodyReturnType(response, methodParser, bodyType) .map((Function<Object, Response<?>>) bodyAsObject -> createResponse(response, entityType, bodyAsObject)) .switchIfEmpty(Mono.defer((Supplier<Mono<Response<?>>>) () -> Mono.just(createResponse(response, entityType, null)))); } } else { asyncResult = handleBodyReturnType(response, methodParser, entityType); } return asyncResult; } @SuppressWarnings("unchecked") private Response<?> createResponse(HttpDecodedResponse response, Type entityType, Object bodyAsObject) { final HttpResponse httpResponse = response.sourceResponse(); final HttpRequest httpRequest = httpResponse.request(); final int responseStatusCode = httpResponse.statusCode(); final HttpHeaders responseHeaders = httpResponse.headers(); Class<? extends Response<?>> cls = (Class<? extends Response<?>>) TypeUtil.getRawClass(entityType); if (cls.equals(Response.class)) { cls = (Class<? extends Response<?>>) (Object) ResponseBase.class; } else if (cls.equals(PagedResponse.class)) { cls = (Class<? extends Response<?>>) (Object) PagedResponseBase.class; if (bodyAsObject != null && !TypeUtil.isTypeOrSubTypeOf(bodyAsObject.getClass(), Page.class)) { throw logger.logExceptionAsError(new RuntimeException("Unable to create PagedResponse<T>. Body must be of a type that implements: " + Page.class)); } } List<Constructor<?>> constructors = Arrays.stream(cls.getDeclaredConstructors()) .filter(constructor -> { int paramCount = constructor.getParameterCount(); return paramCount >= 3 && paramCount <= 5; }) .sorted(Comparator.comparingInt(Constructor::getParameterCount)) .collect(Collectors.toList()); if (constructors.isEmpty()) { throw logger.logExceptionAsError(new RuntimeException("Cannot find suitable constructor for class " + cls)); } for (Constructor<?> constructor : constructors) { final Constructor<? extends Response<?>> ctor = (Constructor<? extends Response<?>>) constructor; try { final int paramCount = constructor.getParameterCount(); switch (paramCount) { case 3: return ctor.newInstance(httpRequest, responseStatusCode, responseHeaders); case 4: return ctor.newInstance(httpRequest, responseStatusCode, responseHeaders, bodyAsObject); case 5: return ctor.newInstance(httpRequest, responseStatusCode, responseHeaders, bodyAsObject, response.decodedHeaders().block()); default: throw logger.logExceptionAsError(new IllegalStateException("Response constructor with expected parameters not found.")); } } catch (IllegalAccessException | InvocationTargetException | InstantiationException e) { throw logger.logExceptionAsError(reactor.core.Exceptions.propagate(e)); } } throw logger.logExceptionAsError(new RuntimeException("Cannot find suitable constructor for class " + cls)); } protected final Mono<?> handleBodyReturnType(final HttpDecodedResponse response, final SwaggerMethodParser methodParser, final Type entityType) { final int responseStatusCode = response.sourceResponse().statusCode(); final HttpMethod httpMethod = methodParser.httpMethod(); final Type returnValueWireType = methodParser.returnValueWireType(); final Mono<?> asyncResult; if (httpMethod == HttpMethod.HEAD && (TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.TYPE) || TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.class))) { boolean isSuccess = (responseStatusCode / 100) == 2; asyncResult = Mono.just(isSuccess); } else if (TypeUtil.isTypeOrSubTypeOf(entityType, byte[].class)) { Mono<byte[]> responseBodyBytesAsync = response.sourceResponse().bodyAsByteArray(); if (returnValueWireType == Base64Url.class) { responseBodyBytesAsync = responseBodyBytesAsync.map(base64UrlBytes -> new Base64Url(base64UrlBytes).decodedBytes()); } asyncResult = responseBodyBytesAsync; } else if (FluxUtil.isFluxByteBuffer(entityType)) { asyncResult = Mono.just(response.sourceResponse().body()); } else { asyncResult = response.decodedBody(); } return asyncResult; } protected Object handleHttpResponse(final HttpRequest httpRequest, Mono<HttpDecodedResponse> asyncDecodedHttpResponse, SwaggerMethodParser methodParser, Type returnType, Context context) { return handleRestReturnType(asyncDecodedHttpResponse, methodParser, returnType, context); } protected Object handleResumeOperation(HttpRequest httpRequest, OperationDescription operationDescription, SwaggerMethodParser methodParser, Type returnType, Context context) throws Exception { throw new Exception("The resume operation is not available in the base RestProxy class."); } /** * Handle the provided asynchronous HTTP response and return the deserialized value. * * @param asyncHttpDecodedResponse the asynchronous HTTP response to the original HTTP request * @param methodParser the SwaggerMethodParser that the request originates from * @param returnType the type of value that will be returned * @param context Additional context that is passed through the Http pipeline during the service call. * @return the deserialized result */ public final Object handleRestReturnType(Mono<HttpDecodedResponse> asyncHttpDecodedResponse, final SwaggerMethodParser methodParser, final Type returnType, Context context) { final Mono<HttpDecodedResponse> asyncExpectedResponse = ensureExpectedStatus(asyncHttpDecodedResponse, methodParser) .doOnEach(RestProxy::endTracingSpan) .subscriberContext(reactor.util.context.Context.of("TRACING_CONTEXT", context)); final Object result; if (TypeUtil.isTypeOrSubTypeOf(returnType, Mono.class)) { final Type monoTypeParam = TypeUtil.getTypeArgument(returnType); if (TypeUtil.isTypeOrSubTypeOf(monoTypeParam, Void.class)) { result = asyncExpectedResponse.then(); } else { result = asyncExpectedResponse.flatMap(response -> handleRestResponseReturnType(response, methodParser, monoTypeParam)); } } else if (FluxUtil.isFluxByteBuffer(returnType)) { result = asyncExpectedResponse.flatMapMany(ar -> ar.sourceResponse().body()); } else if (TypeUtil.isTypeOrSubTypeOf(returnType, void.class) || TypeUtil.isTypeOrSubTypeOf(returnType, Void.class)) { asyncExpectedResponse.block(); result = null; } else { result = asyncExpectedResponse .flatMap(httpResponse -> handleRestResponseReturnType(httpResponse, methodParser, returnType)) .block(); } return result; } private static void endTracingSpan(Signal<HttpDecodedResponse> signal) { if (signal.isOnComplete() || signal.isOnSubscribe()) { return; } reactor.util.context.Context context = signal.getContext(); Optional<Context> tracingContext = context.getOrEmpty("TRACING_CONTEXT"); if (!tracingContext.isPresent()) { return; } int statusCode = 0; HttpDecodedResponse httpDecodedResponse; Throwable throwable = null; if (signal.hasValue()) { httpDecodedResponse = signal.get(); statusCode = httpDecodedResponse.sourceResponse().statusCode(); } else if (signal.hasError()) { throwable = signal.getThrowable(); if (throwable instanceof HttpResponseException) { HttpResponseException exception = (HttpResponseException) throwable; statusCode = exception.response().statusCode(); } } TracerProxy.end(statusCode, throwable, tracingContext.get()); } /** * Create an instance of the default serializer. * * @return the default serializer */ private static SerializerAdapter createDefaultSerializer() { return JacksonAdapter.createDefaultSerializerAdapter(); } /** * Create the default HttpPipeline. * * @return the default HttpPipeline */ public static HttpPipeline createDefaultPipeline() { return createDefaultPipeline((HttpPipelinePolicy) null); } /** * Create the default HttpPipeline. * * @param credentials the credentials to use to apply authentication to the pipeline * @return the default HttpPipeline */ public static HttpPipeline createDefaultPipeline(TokenCredential credentials) { return createDefaultPipeline(new BearerTokenAuthenticationPolicy(credentials)); } /** * Create the default HttpPipeline. * @param credentialsPolicy the credentials policy factory to use to apply authentication to the * pipeline * @return the default HttpPipeline */ public static HttpPipeline createDefaultPipeline(HttpPipelinePolicy credentialsPolicy) { List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy()); policies.add(new RetryPolicy()); policies.add(new CookiePolicy()); if (credentialsPolicy != null) { policies.add(credentialsPolicy); } return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface) { return create(swaggerInterface, createDefaultPipeline(), createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) { return create(swaggerInterface, httpPipeline, createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http * requests * @param serializer the serializer that will be used to convert POJOs to and from request and * response bodies * @param <A> the type of the Swagger interface. * @return a proxy implementation of the provided Swagger interface */ @SuppressWarnings("unchecked") public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) { final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer); final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser); return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface}, restProxy); } }
class RestProxy implements InvocationHandler { private final ClientLogger logger = new ClientLogger(RestProxy.class); private final HttpPipeline httpPipeline; private final SerializerAdapter serializer; private final SwaggerInterfaceParser interfaceParser; private final HttpResponseDecoder decoder; /** * Create a RestProxy. * * @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP * requests. * @param serializer the serializer that will be used to convert response bodies to POJOs. * @param interfaceParser the parser that contains information about the interface describing REST API methods * that this RestProxy "implements". */ public RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) { this.httpPipeline = httpPipeline; this.serializer = serializer; this.interfaceParser = interfaceParser; this.decoder = new HttpResponseDecoder(this.serializer); } /** * Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger * interface that this RestProxy was created to "implement". * * @param method the method to get a SwaggerMethodParser for * @return the SwaggerMethodParser for the provided method */ private SwaggerMethodParser methodParser(Method method) { return interfaceParser.methodParser(method); } /** * Get the SerializerAdapter used by this RestProxy. * * @return The SerializerAdapter used by this RestProxy */ public SerializerAdapter serializer() { return serializer; } /** * Send the provided request asynchronously, applying any request policies provided to the HttpClient instance. * * @param request the HTTP request to send * @param contextData the context * @return a {@link Mono} that emits HttpResponse asynchronously */ public Mono<HttpResponse> send(HttpRequest request, Context contextData) { return httpPipeline.send(request, contextData); } @Override public Object invoke(Object proxy, final Method method, Object[] args) { try { final SwaggerMethodParser methodParser; final HttpRequest request; if (method.isAnnotationPresent(ResumeOperation.class)) { OperationDescription opDesc = ImplUtils.findFirstOfType(args, OperationDescription.class); Method resumeMethod = determineResumeMethod(method, opDesc.methodName()); methodParser = methodParser(resumeMethod); request = createHttpRequest(opDesc, methodParser, args); final Type returnType = methodParser.returnType(); return handleResumeOperation(request, opDesc, methodParser, returnType, startTracingSpan(resumeMethod, Context.NONE)); } else { methodParser = methodParser(method); request = createHttpRequest(methodParser, args); Context context = methodParser.context(args).addData("caller-method", methodParser.fullyQualifiedMethodName()); context = startTracingSpan(method, context); if (request.body() != null) { request.body(validateLength(request)); } final Mono<HttpResponse> asyncResponse = send(request, context); Mono<HttpDecodedResponse> asyncDecodedResponse = this.decoder.decode(asyncResponse, methodParser); return handleHttpResponse(request, asyncDecodedResponse, methodParser, methodParser.returnType(), context); } } catch (Exception e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } } private Flux<ByteBuffer> validateLength(final HttpRequest request) { final Flux<ByteBuffer> bbFlux = request.body(); if (bbFlux == null) { return Flux.empty(); } return Flux.defer(() -> { Long expectedLength = Long.valueOf(request.headers().value("Content-Length")); final long[] currentTotalLength = new long[1]; return bbFlux.doOnEach(s -> { if (s.isOnNext()) { ByteBuffer byteBuffer = s.get(); int currentLength = (byteBuffer == null) ? 0 : byteBuffer.remaining(); currentTotalLength[0] += currentLength; if (currentTotalLength[0] > expectedLength) { throw logger.logExceptionAsError(new UnexpectedLengthException( String.format("Request body emitted %d bytes more than the expected %d bytes.", currentTotalLength[0], expectedLength), currentTotalLength[0], expectedLength)); } } else if (s.isOnComplete()) { if (expectedLength.compareTo(currentTotalLength[0]) != 0) { throw logger.logExceptionAsError(new UnexpectedLengthException( String.format("Request body emitted %d bytes less than the expected %d bytes.", currentTotalLength[0], expectedLength), currentTotalLength[0], expectedLength)); } } }); }); } private Method determineResumeMethod(Method method, String resumeMethodName) { for (Method potentialResumeMethod : method.getDeclaringClass().getMethods()) { if (potentialResumeMethod.getName().equals(resumeMethodName)) { return potentialResumeMethod; } } return null; } /** * Starts the tracing span for the current service call, additionally set metadata attributes on the span by passing * additional context information. * @param method Service method being called. * @param context Context information about the current service call. * @return The updated context containing the span context. */ private Context startTracingSpan(Method method, Context context) { String spanName = String.format("Azure.%s/%s", interfaceParser.serviceName(), method.getName()); context = TracerProxy.setSpanName(spanName, context); return TracerProxy.start(spanName, context); } /** * Create a HttpRequest for the provided Swagger method using the provided arguments. * * @param methodParser the Swagger method parser to use * @param args the arguments to use to populate the method's annotation values * @return a HttpRequest * @throws IOException thrown if the body contents cannot be serialized */ private HttpRequest createHttpRequest(SwaggerMethodParser methodParser, Object[] args) throws IOException { UrlBuilder urlBuilder; final String path = methodParser.path(args); final UrlBuilder pathUrlBuilder = UrlBuilder.parse(path); if (pathUrlBuilder.scheme() != null) { urlBuilder = pathUrlBuilder; } else { urlBuilder = new UrlBuilder(); final String scheme = methodParser.scheme(args); urlBuilder.scheme(scheme); final String host = methodParser.host(args); urlBuilder.host(host); if (path != null && !path.isEmpty() && !path.equals("/")) { String hostPath = urlBuilder.path(); if (hostPath == null || hostPath.isEmpty() || hostPath.equals("/")) { urlBuilder.path(path); } else { urlBuilder.path(hostPath + "/" + path); } } } for (final EncodedParameter queryParameter : methodParser.encodedQueryParameters(args)) { urlBuilder.setQueryParameter(queryParameter.name(), queryParameter.encodedValue()); } final URL url = urlBuilder.toURL(); final HttpRequest request = configRequest(new HttpRequest(methodParser.httpMethod(), url), methodParser, args); for (final HttpHeader header : methodParser.headers(args)) { request.header(header.name(), header.value()); } return request; } /** * Create a HttpRequest for the provided Swagger method using the provided arguments. * * @param methodParser the Swagger method parser to use * @param args the arguments to use to populate the method's annotation values * @return a HttpRequest * @throws IOException thrown if the body contents cannot be serialized */ private HttpRequest createHttpRequest(OperationDescription operationDescription, SwaggerMethodParser methodParser, Object[] args) throws IOException { final HttpRequest request = configRequest(new HttpRequest(methodParser.httpMethod(), operationDescription.url()), methodParser, args); for (final String headerName : operationDescription.headers().keySet()) { request.header(headerName, operationDescription.headers().get(headerName)); } return request; } @SuppressWarnings("unchecked") private Mono<HttpDecodedResponse> ensureExpectedStatus(Mono<HttpDecodedResponse> asyncDecodedResponse, final SwaggerMethodParser methodParser) { return asyncDecodedResponse .flatMap(decodedHttpResponse -> ensureExpectedStatus(decodedHttpResponse, methodParser, null)); } private static Exception instantiateUnexpectedException(UnexpectedExceptionInformation exception, HttpResponse httpResponse, String responseContent, Object responseDecodedContent) { final int responseStatusCode = httpResponse.statusCode(); String contentType = httpResponse.headerValue("Content-Type"); String bodyRepresentation; if ("application/octet-stream".equalsIgnoreCase(contentType)) { bodyRepresentation = "(" + httpResponse.headerValue("Content-Length") + "-byte body)"; } else { bodyRepresentation = responseContent.isEmpty() ? "(empty body)" : "\"" + responseContent + "\""; } Exception result; try { final Constructor<? extends HttpResponseException> exceptionConstructor = exception.exceptionType().getConstructor(String.class, HttpResponse.class, exception.exceptionBodyType()); result = exceptionConstructor.newInstance("Status code " + responseStatusCode + ", " + bodyRepresentation, httpResponse, responseDecodedContent); } catch (ReflectiveOperationException e) { String message = "Status code " + responseStatusCode + ", but an instance of " + exception.exceptionType().getCanonicalName() + " cannot be created." + " Response body: " + bodyRepresentation; result = new IOException(message, e); } return result; } /** * Create a publisher that (1) emits error if the provided response {@code decodedResponse} has * 'disallowed status code' OR (2) emits provided response if it's status code ia allowed. * * 'disallowed status code' is one of the status code defined in the provided SwaggerMethodParser * or is in the int[] of additional allowed status codes. * * @param decodedResponse The HttpResponse to check. * @param methodParser The method parser that contains information about the service interface * method that initiated the HTTP request. * @param additionalAllowedStatusCodes Additional allowed status codes that are permitted based * on the context of the HTTP request. * @return An async-version of the provided decodedResponse. */ public Mono<HttpDecodedResponse> ensureExpectedStatus(final HttpDecodedResponse decodedResponse, final SwaggerMethodParser methodParser, int[] additionalAllowedStatusCodes) { final int responseStatusCode = decodedResponse.sourceResponse().statusCode(); final Mono<HttpDecodedResponse> asyncResult; if (!methodParser.isExpectedResponseStatusCode(responseStatusCode, additionalAllowedStatusCodes)) { Mono<String> bodyAsString = decodedResponse.sourceResponse().bodyAsString(); asyncResult = bodyAsString.flatMap((Function<String, Mono<HttpDecodedResponse>>) responseContent -> { Mono<Object> decodedErrorBody = decodedResponse.decodedBody(); return decodedErrorBody.flatMap((Function<Object, Mono<HttpDecodedResponse>>) responseDecodedErrorObject -> { Throwable exception = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode), decodedResponse.sourceResponse(), responseContent, responseDecodedErrorObject); return Mono.error(exception); }).switchIfEmpty(Mono.defer((Supplier<Mono<HttpDecodedResponse>>) () -> { Throwable exception = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode), decodedResponse.sourceResponse(), responseContent, null); return Mono.error(exception); })); }).switchIfEmpty(Mono.defer((Supplier<Mono<HttpDecodedResponse>>) () -> { Throwable exception = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode), decodedResponse.sourceResponse(), "", null); return Mono.error(exception); })); } else { asyncResult = Mono.just(decodedResponse); } return asyncResult; } private Mono<?> handleRestResponseReturnType(HttpDecodedResponse response, SwaggerMethodParser methodParser, Type entityType) { Mono<?> asyncResult; if (TypeUtil.isTypeOrSubTypeOf(entityType, Response.class)) { Type bodyType = TypeUtil.getRestResponseBodyType(entityType); if (TypeUtil.isTypeOrSubTypeOf(bodyType, Void.class)) { asyncResult = response.sourceResponse().body().ignoreElements() .then(Mono.just(createResponse(response, entityType, null))); } else { asyncResult = handleBodyReturnType(response, methodParser, bodyType) .map((Function<Object, Response<?>>) bodyAsObject -> createResponse(response, entityType, bodyAsObject)) .switchIfEmpty(Mono.defer((Supplier<Mono<Response<?>>>) () -> Mono.just(createResponse(response, entityType, null)))); } } else { asyncResult = handleBodyReturnType(response, methodParser, entityType); } return asyncResult; } @SuppressWarnings("unchecked") private Response<?> createResponse(HttpDecodedResponse response, Type entityType, Object bodyAsObject) { final HttpResponse httpResponse = response.sourceResponse(); final HttpRequest httpRequest = httpResponse.request(); final int responseStatusCode = httpResponse.statusCode(); final HttpHeaders responseHeaders = httpResponse.headers(); Class<? extends Response<?>> cls = (Class<? extends Response<?>>) TypeUtil.getRawClass(entityType); if (cls.equals(Response.class)) { cls = (Class<? extends Response<?>>) (Object) ResponseBase.class; } else if (cls.equals(PagedResponse.class)) { cls = (Class<? extends Response<?>>) (Object) PagedResponseBase.class; if (bodyAsObject != null && !TypeUtil.isTypeOrSubTypeOf(bodyAsObject.getClass(), Page.class)) { throw logger.logExceptionAsError(new RuntimeException("Unable to create PagedResponse<T>. Body must be of a type that implements: " + Page.class)); } } List<Constructor<?>> constructors = Arrays.stream(cls.getDeclaredConstructors()) .filter(constructor -> { int paramCount = constructor.getParameterCount(); return paramCount >= 3 && paramCount <= 5; }) .sorted(Comparator.comparingInt(Constructor::getParameterCount)) .collect(Collectors.toList()); if (constructors.isEmpty()) { throw logger.logExceptionAsError(new RuntimeException("Cannot find suitable constructor for class " + cls)); } for (Constructor<?> constructor : constructors) { final Constructor<? extends Response<?>> ctor = (Constructor<? extends Response<?>>) constructor; try { final int paramCount = constructor.getParameterCount(); switch (paramCount) { case 3: return ctor.newInstance(httpRequest, responseStatusCode, responseHeaders); case 4: return ctor.newInstance(httpRequest, responseStatusCode, responseHeaders, bodyAsObject); case 5: return ctor.newInstance(httpRequest, responseStatusCode, responseHeaders, bodyAsObject, response.decodedHeaders().block()); default: throw logger.logExceptionAsError(new IllegalStateException("Response constructor with expected parameters not found.")); } } catch (IllegalAccessException | InvocationTargetException | InstantiationException e) { throw logger.logExceptionAsError(reactor.core.Exceptions.propagate(e)); } } throw logger.logExceptionAsError(new RuntimeException("Cannot find suitable constructor for class " + cls)); } protected final Mono<?> handleBodyReturnType(final HttpDecodedResponse response, final SwaggerMethodParser methodParser, final Type entityType) { final int responseStatusCode = response.sourceResponse().statusCode(); final HttpMethod httpMethod = methodParser.httpMethod(); final Type returnValueWireType = methodParser.returnValueWireType(); final Mono<?> asyncResult; if (httpMethod == HttpMethod.HEAD && (TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.TYPE) || TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.class))) { boolean isSuccess = (responseStatusCode / 100) == 2; asyncResult = Mono.just(isSuccess); } else if (TypeUtil.isTypeOrSubTypeOf(entityType, byte[].class)) { Mono<byte[]> responseBodyBytesAsync = response.sourceResponse().bodyAsByteArray(); if (returnValueWireType == Base64Url.class) { responseBodyBytesAsync = responseBodyBytesAsync.map(base64UrlBytes -> new Base64Url(base64UrlBytes).decodedBytes()); } asyncResult = responseBodyBytesAsync; } else if (FluxUtil.isFluxByteBuffer(entityType)) { asyncResult = Mono.just(response.sourceResponse().body()); } else { asyncResult = response.decodedBody(); } return asyncResult; } protected Object handleHttpResponse(final HttpRequest httpRequest, Mono<HttpDecodedResponse> asyncDecodedHttpResponse, SwaggerMethodParser methodParser, Type returnType, Context context) { return handleRestReturnType(asyncDecodedHttpResponse, methodParser, returnType, context); } protected Object handleResumeOperation(HttpRequest httpRequest, OperationDescription operationDescription, SwaggerMethodParser methodParser, Type returnType, Context context) { throw logger.logExceptionAsError(Exceptions.propagate(new Exception("The resume operation is not available in the base RestProxy class."))); } /** * Handle the provided asynchronous HTTP response and return the deserialized value. * * @param asyncHttpDecodedResponse the asynchronous HTTP response to the original HTTP request * @param methodParser the SwaggerMethodParser that the request originates from * @param returnType the type of value that will be returned * @param context Additional context that is passed through the Http pipeline during the service call. * @return the deserialized result */ public final Object handleRestReturnType(Mono<HttpDecodedResponse> asyncHttpDecodedResponse, final SwaggerMethodParser methodParser, final Type returnType, Context context) { final Mono<HttpDecodedResponse> asyncExpectedResponse = ensureExpectedStatus(asyncHttpDecodedResponse, methodParser) .doOnEach(RestProxy::endTracingSpan) .subscriberContext(reactor.util.context.Context.of("TRACING_CONTEXT", context)); final Object result; if (TypeUtil.isTypeOrSubTypeOf(returnType, Mono.class)) { final Type monoTypeParam = TypeUtil.getTypeArgument(returnType); if (TypeUtil.isTypeOrSubTypeOf(monoTypeParam, Void.class)) { result = asyncExpectedResponse.then(); } else { result = asyncExpectedResponse.flatMap(response -> handleRestResponseReturnType(response, methodParser, monoTypeParam)); } } else if (FluxUtil.isFluxByteBuffer(returnType)) { result = asyncExpectedResponse.flatMapMany(ar -> ar.sourceResponse().body()); } else if (TypeUtil.isTypeOrSubTypeOf(returnType, void.class) || TypeUtil.isTypeOrSubTypeOf(returnType, Void.class)) { asyncExpectedResponse.block(); result = null; } else { result = asyncExpectedResponse .flatMap(httpResponse -> handleRestResponseReturnType(httpResponse, methodParser, returnType)) .block(); } return result; } private static void endTracingSpan(Signal<HttpDecodedResponse> signal) { if (signal.isOnComplete() || signal.isOnSubscribe()) { return; } reactor.util.context.Context context = signal.getContext(); Optional<Context> tracingContext = context.getOrEmpty("TRACING_CONTEXT"); if (!tracingContext.isPresent()) { return; } int statusCode = 0; HttpDecodedResponse httpDecodedResponse; Throwable throwable = null; if (signal.hasValue()) { httpDecodedResponse = signal.get(); statusCode = httpDecodedResponse.sourceResponse().statusCode(); } else if (signal.hasError()) { throwable = signal.getThrowable(); if (throwable instanceof HttpResponseException) { HttpResponseException exception = (HttpResponseException) throwable; statusCode = exception.response().statusCode(); } } TracerProxy.end(statusCode, throwable, tracingContext.get()); } /** * Create an instance of the default serializer. * * @return the default serializer */ private static SerializerAdapter createDefaultSerializer() { return JacksonAdapter.createDefaultSerializerAdapter(); } /** * Create the default HttpPipeline. * * @return the default HttpPipeline */ public static HttpPipeline createDefaultPipeline() { return createDefaultPipeline((HttpPipelinePolicy) null); } /** * Create the default HttpPipeline. * * @param credentials the credentials to use to apply authentication to the pipeline * @return the default HttpPipeline */ public static HttpPipeline createDefaultPipeline(TokenCredential credentials) { return createDefaultPipeline(new BearerTokenAuthenticationPolicy(credentials)); } /** * Create the default HttpPipeline. * @param credentialsPolicy the credentials policy factory to use to apply authentication to the * pipeline * @return the default HttpPipeline */ public static HttpPipeline createDefaultPipeline(HttpPipelinePolicy credentialsPolicy) { List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy()); policies.add(new RetryPolicy()); policies.add(new CookiePolicy()); if (credentialsPolicy != null) { policies.add(credentialsPolicy); } return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface) { return create(swaggerInterface, createDefaultPipeline(), createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) { return create(swaggerInterface, httpPipeline, createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http * requests * @param serializer the serializer that will be used to convert POJOs to and from request and * response bodies * @param <A> the type of the Swagger interface. * @return a proxy implementation of the provided Swagger interface */ @SuppressWarnings("unchecked") public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) { final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer); final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser); return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface}, restProxy); } }
Done in the [PR](https://github.com/Azure/azure-sdk-for-java/pull/5222), added similar message for all null checks.
public OkHttpAsyncHttpClientBuilder(okhttp3.OkHttpClient okHttpClient) { this.okHttpClient = Objects.requireNonNull(okHttpClient, "okHttpClient == null"); }
this.okHttpClient = Objects.requireNonNull(okHttpClient, "okHttpClient == null");
public OkHttpAsyncHttpClientBuilder(okhttp3.OkHttpClient okHttpClient) { this.okHttpClient = Objects.requireNonNull(okHttpClient, "okHttpClient == null"); }
class OkHttpAsyncHttpClientBuilder { private final ClientLogger logger = new ClientLogger(OkHttpAsyncHttpClientBuilder.class); private final okhttp3.OkHttpClient okHttpClient; private final static Duration DEFAULT_READ_TIMEOUT = Duration.ofSeconds(120); private final static Duration DEFAULT_CONNECT_TIMEOUT = Duration.ofSeconds(60); private List<Interceptor> networkInterceptors = new ArrayList<>(); private Duration readTimeout; private Duration connectionTimeout; private ConnectionPool connectionPool; private Dispatcher dispatcher; private java.net.Proxy proxy; private Authenticator proxyAuthenticator; /** * Creates OkHttpAsyncHttpClientBuilder. */ public OkHttpAsyncHttpClientBuilder() { this.okHttpClient = null; } /** * Creates OkHttpAsyncHttpClientBuilder from the builder of an existing OkHttpClient. * * @param okHttpClient the httpclient */ /** * Add a network layer interceptor to Http request pipeline. * * @param networkInterceptor the interceptor to add * @return the builder */ public OkHttpAsyncHttpClientBuilder networkInterceptor(Interceptor networkInterceptor) { Objects.requireNonNull(networkInterceptor); this.networkInterceptors.add(networkInterceptor); return this; } /** * Add network layer interceptors to Http request pipeline. * * This replaces all previously-set interceptors. * * @param networkInterceptors the interceptors to add * @return the builder */ public OkHttpAsyncHttpClientBuilder networkInterceptors(List<Interceptor> networkInterceptors) { this.networkInterceptors = Objects.requireNonNull(networkInterceptors); return this; } /** * Sets the read timeout. * * The default read timeout is 120 seconds. * * @param readTimeout the timeout * @return the builder */ public OkHttpAsyncHttpClientBuilder readTimeout(Duration readTimeout) { this.readTimeout = readTimeout; return this; } /** * Sets the connection timeout. * * The default read timeout is 60 seconds. * * @param connectionTimeout the timeout * @return the builder */ public OkHttpAsyncHttpClientBuilder connectionTimeout(Duration connectionTimeout) { this.connectionTimeout = connectionTimeout; return this; } /** * Sets the Http connection pool. * * @param connectionPool the OkHttp connection pool to use * @return the builder */ public OkHttpAsyncHttpClientBuilder connectionPool(ConnectionPool connectionPool) { this.connectionPool = Objects.requireNonNull(connectionPool, "connectionPool == null"); return this; } /** * Sets the dispatcher that also composes the thread pool for executing HTTP requests. * * @param dispatcher the dispatcher to use * @return the builder */ public OkHttpAsyncHttpClientBuilder dispatcher(Dispatcher dispatcher) { this.dispatcher = Objects.requireNonNull(dispatcher, "dispatcher == null"); return this; } /** * Sets the proxy. * * @param proxy the proxy * @return the builder */ public OkHttpAsyncHttpClientBuilder proxy(java.net.Proxy proxy) { this.proxy = proxy; return this; } /** * Sets the proxy authenticator. * * @param proxyAuthenticator the proxy authenticator * @return the builder */ public OkHttpAsyncHttpClientBuilder proxyAuthenticator(Authenticator proxyAuthenticator) { this.proxyAuthenticator = Objects.requireNonNull(proxyAuthenticator, "proxyAuthenticator == null"); return this; } /** * Build a HttpClient with current configurations. * * @return a {@link HttpClient}. */ public HttpClient build() { OkHttpClient.Builder httpClientBuilder = this.okHttpClient == null ? new OkHttpClient.Builder() : this.okHttpClient.newBuilder(); for (Interceptor interceptor : this.networkInterceptors) { httpClientBuilder = httpClientBuilder.addNetworkInterceptor(interceptor); } if (this.readTimeout != null) { httpClientBuilder = httpClientBuilder.readTimeout(this.readTimeout); } else { httpClientBuilder = httpClientBuilder.readTimeout(DEFAULT_READ_TIMEOUT); } if (this.connectionTimeout != null) { httpClientBuilder = httpClientBuilder.connectTimeout(this.connectionTimeout); } else { httpClientBuilder = httpClientBuilder.connectTimeout(DEFAULT_CONNECT_TIMEOUT); } if (this.connectionPool != null) { httpClientBuilder = httpClientBuilder.connectionPool(connectionPool); } if (this.dispatcher != null) { httpClientBuilder = httpClientBuilder.dispatcher(dispatcher); } httpClientBuilder = httpClientBuilder.proxy(this.proxy); if (this.proxyAuthenticator != null) { httpClientBuilder = httpClientBuilder.authenticator(this.proxyAuthenticator); } return new OkHttpAsyncHttpClient(httpClientBuilder.build()); } }
class OkHttpAsyncHttpClientBuilder { private final ClientLogger logger = new ClientLogger(OkHttpAsyncHttpClientBuilder.class); private final okhttp3.OkHttpClient okHttpClient; private final static Duration DEFAULT_READ_TIMEOUT = Duration.ofSeconds(120); private final static Duration DEFAULT_CONNECT_TIMEOUT = Duration.ofSeconds(60); private List<Interceptor> networkInterceptors = new ArrayList<>(); private Duration readTimeout; private Duration connectionTimeout; private ConnectionPool connectionPool; private Dispatcher dispatcher; private java.net.Proxy proxy; private Authenticator proxyAuthenticator; /** * Creates OkHttpAsyncHttpClientBuilder. */ public OkHttpAsyncHttpClientBuilder() { this.okHttpClient = null; } /** * Creates OkHttpAsyncHttpClientBuilder from the builder of an existing OkHttpClient. * * @param okHttpClient the httpclient */ /** * Add a network layer interceptor to Http request pipeline. * * @param networkInterceptor the interceptor to add * @return the builder */ public OkHttpAsyncHttpClientBuilder networkInterceptor(Interceptor networkInterceptor) { Objects.requireNonNull(networkInterceptor); this.networkInterceptors.add(networkInterceptor); return this; } /** * Add network layer interceptors to Http request pipeline. * * This replaces all previously-set interceptors. * * @param networkInterceptors the interceptors to add * @return the builder */ public OkHttpAsyncHttpClientBuilder networkInterceptors(List<Interceptor> networkInterceptors) { this.networkInterceptors = Objects.requireNonNull(networkInterceptors); return this; } /** * Sets the read timeout. * * The default read timeout is 120 seconds. * * @param readTimeout the timeout * @return the builder */ public OkHttpAsyncHttpClientBuilder readTimeout(Duration readTimeout) { this.readTimeout = readTimeout; return this; } /** * Sets the connection timeout. * * The default read timeout is 60 seconds. * * @param connectionTimeout the timeout * @return the builder */ public OkHttpAsyncHttpClientBuilder connectionTimeout(Duration connectionTimeout) { this.connectionTimeout = connectionTimeout; return this; } /** * Sets the Http connection pool. * * @param connectionPool the OkHttp connection pool to use * @return the builder */ public OkHttpAsyncHttpClientBuilder connectionPool(ConnectionPool connectionPool) { this.connectionPool = Objects.requireNonNull(connectionPool, "connectionPool == null"); return this; } /** * Sets the dispatcher that also composes the thread pool for executing HTTP requests. * * @param dispatcher the dispatcher to use * @return the builder */ public OkHttpAsyncHttpClientBuilder dispatcher(Dispatcher dispatcher) { this.dispatcher = Objects.requireNonNull(dispatcher, "dispatcher == null"); return this; } /** * Sets the proxy. * * @param proxy the proxy * @return the builder */ public OkHttpAsyncHttpClientBuilder proxy(java.net.Proxy proxy) { this.proxy = proxy; return this; } /** * Sets the proxy authenticator. * * @param proxyAuthenticator the proxy authenticator * @return the builder */ public OkHttpAsyncHttpClientBuilder proxyAuthenticator(Authenticator proxyAuthenticator) { this.proxyAuthenticator = Objects.requireNonNull(proxyAuthenticator, "proxyAuthenticator == null"); return this; } /** * Build a HttpClient with current configurations. * * @return a {@link HttpClient}. */ public HttpClient build() { OkHttpClient.Builder httpClientBuilder = this.okHttpClient == null ? new OkHttpClient.Builder() : this.okHttpClient.newBuilder(); for (Interceptor interceptor : this.networkInterceptors) { httpClientBuilder = httpClientBuilder.addNetworkInterceptor(interceptor); } if (this.readTimeout != null) { httpClientBuilder = httpClientBuilder.readTimeout(this.readTimeout); } else { httpClientBuilder = httpClientBuilder.readTimeout(DEFAULT_READ_TIMEOUT); } if (this.connectionTimeout != null) { httpClientBuilder = httpClientBuilder.connectTimeout(this.connectionTimeout); } else { httpClientBuilder = httpClientBuilder.connectTimeout(DEFAULT_CONNECT_TIMEOUT); } if (this.connectionPool != null) { httpClientBuilder = httpClientBuilder.connectionPool(connectionPool); } if (this.dispatcher != null) { httpClientBuilder = httpClientBuilder.dispatcher(dispatcher); } httpClientBuilder = httpClientBuilder.proxy(this.proxy); if (this.proxyAuthenticator != null) { httpClientBuilder = httpClientBuilder.authenticator(this.proxyAuthenticator); } return new OkHttpAsyncHttpClient(httpClientBuilder.build()); } }
Good question @srnagar :) Q1: – *if there's a difference between a null responseBody ..*: `okhttp3.Response::body()` getter will not return null for server returned responses. It can be null only if we build response manually with null body (e.g. for mocking) or for the cases described here [ref](https://square.github.io/okhttp/4.x/okhttp/okhttp3/-response/body/). Those null cases are not applicable for OkHttp HttpClient impl. I added the null check as a defensive check, those might never hit. Q2: - *using Mono.just(content) [where content=new byte[0]] vs Mono.empty()* If content part is absent in a http wire response then content getters in `okhttp3.ResponseBody` instance uses Java empty semantic. i.e. `okhttp3.ResponseBody::bytes()` getter return `new byte[0]`, `okhttp3.ResponseBody::string()` returns "". Similarly Netty uses empty `ByteBuf` i.e. `ByteBuf` of size 0 if content part is absent. In reactive, the pattern of using `Flux::empty()/Mono::empty()` for absence of value is common. If we look at reactor-netty as an example, it translate an empty `ByteBuf` from Netty to `Flux::empty()`. Our reactor-netty azure-core HttpClient plugin follows the same pattern. Any implementation of azure-core HttpClient is expected to follow this pattern, hence okHttp HttpClient plugin. --- It looks like I need to clarify above points in the code to make it readable for others. I've opened a [PR](https://github.com/Azure/azure-sdk-for-java/pull/5222) same behavior but better readability. It also centralize resource management. Thanks for raising this question.
public Mono<byte[]> bodyAsByteArray() { if (this.responseBody() == null) { return Mono.empty(); } else { return Mono.using(() -> this.responseBody(), rb -> { try { byte[] content = rb.bytes(); return content.length == 0 ? Mono.empty() : Mono.just(content); } catch (IOException ioe) { throw Exceptions.propagate(ioe); } }, rb -> rb.close()); } }
return content.length == 0 ? Mono.empty() : Mono.just(content);
public Mono<byte[]> bodyAsByteArray() { if (this.responseBody() == null) { return Mono.empty(); } else { return Mono.using(() -> this.responseBody(), rb -> { try { byte[] content = rb.bytes(); return content.length == 0 ? Mono.empty() : Mono.just(content); } catch (IOException ioe) { throw Exceptions.propagate(ioe); } }, rb -> rb.close()); } }
class OkHttpResponse extends HttpResponse { private final okhttp3.Response inner; private final HttpHeaders headers; private final static int BYTE_BUFFER_CHUNK_SIZE = 1024; public OkHttpResponse(okhttp3.Response inner, HttpRequest request) { this.inner = inner; this.headers = fromOkHttpHeaders(this.inner.headers()); super.request(request); } @Override public int statusCode() { return this.inner.code(); } @Override public String headerValue(String name) { return this.headers.value(name); } @Override public HttpHeaders headers() { return this.headers; } @Override public Flux<ByteBuffer> body() { return this.responseBody() != null ? toFluxByteBuffer(this.responseBody().byteStream()) : Flux.empty(); } @Override @Override public Mono<String> bodyAsString() { if (this.responseBody() == null) { return Mono.empty(); } else { return Mono.using(() -> this.responseBody(), rb -> { try { String content = rb.string(); return content.length() == 0 ? Mono.empty() : Mono.just(content); } catch (IOException ioe) { throw Exceptions.propagate(ioe); } }, rb -> rb.close()); } } @Override public Mono<String> bodyAsString(Charset charset) { return bodyAsByteArray() .map(bytes -> new String(bytes, charset)); } @Override public void close() { if (this.inner.body() != null) { this.inner.body().close(); } } private okhttp3.ResponseBody responseBody() { return this.inner.body(); } /** * Creates azure-core HttpHeaders from okhttp headers. * * @param headers okhttp headers * @return azure-core HttpHeaders */ private static HttpHeaders fromOkHttpHeaders(okhttp3.Headers headers) { HttpHeaders httpHeaders = new HttpHeaders(); for (String headerName : headers.names()) { httpHeaders.put(headerName, headers.get(headerName)); } return httpHeaders; } /** * Creates a Flux of ByteBuffer, with each ByteBuffer wrapping bytes read from the given * InputStream. * * @param inputStream InputStream to back the Flux * @return Flux of ByteBuffer backed by the InputStream */ private static Flux<ByteBuffer> toFluxByteBuffer(InputStream inputStream) { Pair pair = new Pair(); return Flux.using(() -> inputStream, is -> Flux.just(true) .repeat() .map(ignore -> { byte[] buffer = new byte[BYTE_BUFFER_CHUNK_SIZE]; try { int numBytes = is.read(buffer); if (numBytes > 0) { return pair.buffer(ByteBuffer.wrap(buffer, 0, numBytes)).readBytes(numBytes); } else { return pair.buffer(null).readBytes(numBytes); } } catch (IOException ioe) { throw Exceptions.propagate(ioe); } }) .takeUntil(p -> p.readBytes() == -1) .filter(p -> p.readBytes() > 0) .map(p -> p.buffer()), is -> { try { is.close(); } catch (IOException ioe) { throw Exceptions.propagate(ioe); } } ); } private static class Pair { private ByteBuffer byteBuffer; private int readBytes; ByteBuffer buffer() { return this.byteBuffer; } int readBytes() { return this.readBytes; } Pair buffer(ByteBuffer byteBuffer) { this.byteBuffer = byteBuffer; return this; } Pair readBytes(int cnt) { this.readBytes = cnt; return this; } } }
class OkHttpResponse extends HttpResponse { private final okhttp3.Response inner; private final HttpHeaders headers; private final static int BYTE_BUFFER_CHUNK_SIZE = 1024; public OkHttpResponse(okhttp3.Response inner, HttpRequest request) { this.inner = inner; this.headers = fromOkHttpHeaders(this.inner.headers()); super.request(request); } @Override public int statusCode() { return this.inner.code(); } @Override public String headerValue(String name) { return this.headers.value(name); } @Override public HttpHeaders headers() { return this.headers; } @Override public Flux<ByteBuffer> body() { return this.responseBody() != null ? toFluxByteBuffer(this.responseBody().byteStream()) : Flux.empty(); } @Override @Override public Mono<String> bodyAsString() { if (this.responseBody() == null) { return Mono.empty(); } else { return Mono.using(() -> this.responseBody(), rb -> { try { String content = rb.string(); return content.length() == 0 ? Mono.empty() : Mono.just(content); } catch (IOException ioe) { throw Exceptions.propagate(ioe); } }, rb -> rb.close()); } } @Override public Mono<String> bodyAsString(Charset charset) { return bodyAsByteArray() .map(bytes -> new String(bytes, charset)); } @Override public void close() { if (this.inner.body() != null) { this.inner.body().close(); } } private okhttp3.ResponseBody responseBody() { return this.inner.body(); } /** * Creates azure-core HttpHeaders from okhttp headers. * * @param headers okhttp headers * @return azure-core HttpHeaders */ private static HttpHeaders fromOkHttpHeaders(okhttp3.Headers headers) { HttpHeaders httpHeaders = new HttpHeaders(); for (String headerName : headers.names()) { httpHeaders.put(headerName, headers.get(headerName)); } return httpHeaders; } /** * Creates a Flux of ByteBuffer, with each ByteBuffer wrapping bytes read from the given * InputStream. * * @param inputStream InputStream to back the Flux * @return Flux of ByteBuffer backed by the InputStream */ private static Flux<ByteBuffer> toFluxByteBuffer(InputStream inputStream) { Pair pair = new Pair(); return Flux.using(() -> inputStream, is -> Flux.just(true) .repeat() .map(ignore -> { byte[] buffer = new byte[BYTE_BUFFER_CHUNK_SIZE]; try { int numBytes = is.read(buffer); if (numBytes > 0) { return pair.buffer(ByteBuffer.wrap(buffer, 0, numBytes)).readBytes(numBytes); } else { return pair.buffer(null).readBytes(numBytes); } } catch (IOException ioe) { throw Exceptions.propagate(ioe); } }) .takeUntil(p -> p.readBytes() == -1) .filter(p -> p.readBytes() > 0) .map(p -> p.buffer()), is -> { try { is.close(); } catch (IOException ioe) { throw Exceptions.propagate(ioe); } } ); } private static class Pair { private ByteBuffer byteBuffer; private int readBytes; ByteBuffer buffer() { return this.byteBuffer; } int readBytes() { return this.readBytes; } Pair buffer(ByteBuffer byteBuffer) { this.byteBuffer = byteBuffer; return this; } Pair readBytes(int cnt) { this.readBytes = cnt; return this; } } }
see below comment
public Mono<String> bodyAsString() { if (this.responseBody() == null) { return Mono.empty(); } else { return Mono.using(() -> this.responseBody(), rb -> { try { String content = rb.string(); return content.length() == 0 ? Mono.empty() : Mono.just(content); } catch (IOException ioe) { throw Exceptions.propagate(ioe); } }, rb -> rb.close()); } }
return content.length() == 0 ? Mono.empty() : Mono.just(content);
public Mono<String> bodyAsString() { if (this.responseBody() == null) { return Mono.empty(); } else { return Mono.using(() -> this.responseBody(), rb -> { try { String content = rb.string(); return content.length() == 0 ? Mono.empty() : Mono.just(content); } catch (IOException ioe) { throw Exceptions.propagate(ioe); } }, rb -> rb.close()); } }
class OkHttpResponse extends HttpResponse { private final okhttp3.Response inner; private final HttpHeaders headers; private final static int BYTE_BUFFER_CHUNK_SIZE = 1024; public OkHttpResponse(okhttp3.Response inner, HttpRequest request) { this.inner = inner; this.headers = fromOkHttpHeaders(this.inner.headers()); super.request(request); } @Override public int statusCode() { return this.inner.code(); } @Override public String headerValue(String name) { return this.headers.value(name); } @Override public HttpHeaders headers() { return this.headers; } @Override public Flux<ByteBuffer> body() { return this.responseBody() != null ? toFluxByteBuffer(this.responseBody().byteStream()) : Flux.empty(); } @Override public Mono<byte[]> bodyAsByteArray() { if (this.responseBody() == null) { return Mono.empty(); } else { return Mono.using(() -> this.responseBody(), rb -> { try { byte[] content = rb.bytes(); return content.length == 0 ? Mono.empty() : Mono.just(content); } catch (IOException ioe) { throw Exceptions.propagate(ioe); } }, rb -> rb.close()); } } @Override @Override public Mono<String> bodyAsString(Charset charset) { return bodyAsByteArray() .map(bytes -> new String(bytes, charset)); } @Override public void close() { if (this.inner.body() != null) { this.inner.body().close(); } } private okhttp3.ResponseBody responseBody() { return this.inner.body(); } /** * Creates azure-core HttpHeaders from okhttp headers. * * @param headers okhttp headers * @return azure-core HttpHeaders */ private static HttpHeaders fromOkHttpHeaders(okhttp3.Headers headers) { HttpHeaders httpHeaders = new HttpHeaders(); for (String headerName : headers.names()) { httpHeaders.put(headerName, headers.get(headerName)); } return httpHeaders; } /** * Creates a Flux of ByteBuffer, with each ByteBuffer wrapping bytes read from the given * InputStream. * * @param inputStream InputStream to back the Flux * @return Flux of ByteBuffer backed by the InputStream */ private static Flux<ByteBuffer> toFluxByteBuffer(InputStream inputStream) { Pair pair = new Pair(); return Flux.using(() -> inputStream, is -> Flux.just(true) .repeat() .map(ignore -> { byte[] buffer = new byte[BYTE_BUFFER_CHUNK_SIZE]; try { int numBytes = is.read(buffer); if (numBytes > 0) { return pair.buffer(ByteBuffer.wrap(buffer, 0, numBytes)).readBytes(numBytes); } else { return pair.buffer(null).readBytes(numBytes); } } catch (IOException ioe) { throw Exceptions.propagate(ioe); } }) .takeUntil(p -> p.readBytes() == -1) .filter(p -> p.readBytes() > 0) .map(p -> p.buffer()), is -> { try { is.close(); } catch (IOException ioe) { throw Exceptions.propagate(ioe); } } ); } private static class Pair { private ByteBuffer byteBuffer; private int readBytes; ByteBuffer buffer() { return this.byteBuffer; } int readBytes() { return this.readBytes; } Pair buffer(ByteBuffer byteBuffer) { this.byteBuffer = byteBuffer; return this; } Pair readBytes(int cnt) { this.readBytes = cnt; return this; } } }
class OkHttpResponse extends HttpResponse { private final okhttp3.Response inner; private final HttpHeaders headers; private final static int BYTE_BUFFER_CHUNK_SIZE = 1024; public OkHttpResponse(okhttp3.Response inner, HttpRequest request) { this.inner = inner; this.headers = fromOkHttpHeaders(this.inner.headers()); super.request(request); } @Override public int statusCode() { return this.inner.code(); } @Override public String headerValue(String name) { return this.headers.value(name); } @Override public HttpHeaders headers() { return this.headers; } @Override public Flux<ByteBuffer> body() { return this.responseBody() != null ? toFluxByteBuffer(this.responseBody().byteStream()) : Flux.empty(); } @Override public Mono<byte[]> bodyAsByteArray() { if (this.responseBody() == null) { return Mono.empty(); } else { return Mono.using(() -> this.responseBody(), rb -> { try { byte[] content = rb.bytes(); return content.length == 0 ? Mono.empty() : Mono.just(content); } catch (IOException ioe) { throw Exceptions.propagate(ioe); } }, rb -> rb.close()); } } @Override @Override public Mono<String> bodyAsString(Charset charset) { return bodyAsByteArray() .map(bytes -> new String(bytes, charset)); } @Override public void close() { if (this.inner.body() != null) { this.inner.body().close(); } } private okhttp3.ResponseBody responseBody() { return this.inner.body(); } /** * Creates azure-core HttpHeaders from okhttp headers. * * @param headers okhttp headers * @return azure-core HttpHeaders */ private static HttpHeaders fromOkHttpHeaders(okhttp3.Headers headers) { HttpHeaders httpHeaders = new HttpHeaders(); for (String headerName : headers.names()) { httpHeaders.put(headerName, headers.get(headerName)); } return httpHeaders; } /** * Creates a Flux of ByteBuffer, with each ByteBuffer wrapping bytes read from the given * InputStream. * * @param inputStream InputStream to back the Flux * @return Flux of ByteBuffer backed by the InputStream */ private static Flux<ByteBuffer> toFluxByteBuffer(InputStream inputStream) { Pair pair = new Pair(); return Flux.using(() -> inputStream, is -> Flux.just(true) .repeat() .map(ignore -> { byte[] buffer = new byte[BYTE_BUFFER_CHUNK_SIZE]; try { int numBytes = is.read(buffer); if (numBytes > 0) { return pair.buffer(ByteBuffer.wrap(buffer, 0, numBytes)).readBytes(numBytes); } else { return pair.buffer(null).readBytes(numBytes); } } catch (IOException ioe) { throw Exceptions.propagate(ioe); } }) .takeUntil(p -> p.readBytes() == -1) .filter(p -> p.readBytes() > 0) .map(p -> p.buffer()), is -> { try { is.close(); } catch (IOException ioe) { throw Exceptions.propagate(ioe); } } ); } private static class Pair { private ByteBuffer byteBuffer; private int readBytes; ByteBuffer buffer() { return this.byteBuffer; } int readBytes() { return this.readBytes; } Pair buffer(ByteBuffer byteBuffer) { this.byteBuffer = byteBuffer; return this; } Pair readBytes(int cnt) { this.readBytes = cnt; return this; } } }
no need to change, but, probably a good idea to start keeping in mind the 120 max length
public Mono<Void> uploadFromFile(String uploadFilePath) { AsynchronousFileChannel channel = channelSetup(uploadFilePath); return Flux.fromIterable(sliceFile(uploadFilePath)) .flatMap(chunk -> upload(FluxUtil.readFile(channel, chunk.start(), chunk.end() - chunk.start() + 1), chunk.end() - chunk.start() + 1, chunk.start()) .timeout(Duration.ofSeconds(DOWNLOAD_UPLOAD_CHUNK_TIMEOUT)) .retry(3, throwable -> throwable instanceof IOException || throwable instanceof TimeoutException)) .then() .doOnTerminate(() -> channelCleanUp(channel)); }
.flatMap(chunk -> upload(FluxUtil.readFile(channel, chunk.start(), chunk.end() - chunk.start() + 1), chunk.end() - chunk.start() + 1, chunk.start())
public Mono<Void> uploadFromFile(String uploadFilePath) { return Mono.using(() -> channelSetup(uploadFilePath, StandardOpenOption.READ), channel -> Flux.fromIterable(sliceFile(uploadFilePath)).flatMap(chunk -> upload(FluxUtil.readFile(channel, chunk.start(), chunk.end() - chunk.start() + 1), chunk.end() - chunk.start() + 1, chunk.start()) .timeout(Duration.ofSeconds(DOWNLOAD_UPLOAD_CHUNK_TIMEOUT)) .retry(3, throwable -> throwable instanceof IOException || throwable instanceof TimeoutException)) .then(), this::channelCleanUp); }
class FileAsyncClient { private final ClientLogger logger = new ClientLogger(FileAsyncClient.class); private static final long FILE_DEFAULT_BLOCK_SIZE = 4 * 1024 * 1024L; private static final long DOWNLOAD_UPLOAD_CHUNK_TIMEOUT = 300; private final AzureFileStorageImpl azureFileStorageClient; private final String shareName; private final String filePath; private final String snapshot; /** * Creates a FileAsyncClient that sends requests to the storage file at {@link AzureFileStorageImpl * endpoint}. Each service call goes through the {@link HttpPipeline pipeline} in the {@code client}. * * @param azureFileStorageClient Client that interacts with the service interfaces * @param shareName Name of the share * @param filePath Path to the file * @param snapshot The snapshot of the share */ FileAsyncClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String filePath, String snapshot) { Objects.requireNonNull(shareName); Objects.requireNonNull(filePath); this.shareName = shareName; this.filePath = filePath; this.snapshot = snapshot; this.azureFileStorageClient = azureFileStorageClient; } /** * Creates a FileAsyncClient that sends requests to the storage account at {@code endpoint}. Each service call goes * through the {@code httpPipeline}. * * @param endpoint URL for the Storage File service * @param httpPipeline HttpPipeline that HTTP requests and response flow through * @param shareName Name of the share * @param filePath Path to the file * @param snapshot Optional snapshot of the share */ FileAsyncClient(URL endpoint, HttpPipeline httpPipeline, String shareName, String filePath, String snapshot) { Objects.requireNonNull(shareName); Objects.requireNonNull(filePath); this.shareName = shareName; this.filePath = filePath; this.snapshot = snapshot; this.azureFileStorageClient = new AzureFileStorageBuilder().pipeline(httpPipeline) .url(endpoint.toString()) .build(); } /** * Get the url of the storage file client. * * @return the URL of the storage file client * @throws RuntimeException If the file is using a malformed URL. */ public URL getFileUrl() { try { return new URL(azureFileStorageClient.getUrl()); } catch (MalformedURLException e) { throw logger.logExceptionAsError(new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), azureFileStorageClient.getUrl()), e)); } } /** * Creates a file in the storage account and returns a response of {@link FileInfo} to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with size 1KB.</p> * * {@codesnippet com.azure.storage.file.fileClient.create} * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @return A response containing the file info and the status of creating the file. * @throws StorageException If the file has already existed, the parent directory does not exist or fileName is * an invalid resource name. */ public Mono<FileInfo> create(long maxSize) { return createWithResponse(maxSize, null, null).flatMap(FluxUtil::toMono); } /** * Creates a file in the storage account and returns a response of FileInfo to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with length of 1024 bytes, some headers and metadata.</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.createWithResponse * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @param httpHeaders Additional parameters for the operation. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the naming rules. * @return A response containing the {@link FileInfo file info} and the status of creating the file. * @throws StorageException If the directory has already existed, the parent directory does not exist or directory is an invalid resource name. */ public Mono<Response<FileInfo>> createWithResponse(long maxSize, FileHTTPHeaders httpHeaders, Map<String, String> metadata) { return withContext(context -> createWithResponse(maxSize, httpHeaders, metadata, context)); } Mono<Response<FileInfo>> createWithResponse(long maxSize, FileHTTPHeaders httpHeaders, Map<String, String> metadata, Context context) { String fileAttributes = "None"; String filePermission = "inherit"; String fileCreationTime = "now"; String fileLastWriteTime = "now"; return postProcessResponse(azureFileStorageClient.files() .createWithRestResponseAsync(shareName, filePath, maxSize, fileAttributes, fileCreationTime, fileLastWriteTime, null, metadata, filePermission, null, httpHeaders, context)) .map(this::createFileInfoResponse); } /** * Copies a blob or file to a destination file within the storage account. * * <p><strong>Code Samples</strong></p> * * <p>Copy file from source url to the {@code filePath} </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.startCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the naming rules. * @return The {@link FileCopyInfo file copy info}. * @see <a href="https: */ public Mono<FileCopyInfo> startCopy(String sourceUrl, Map<String, String> metadata) { return startCopyWithResponse(sourceUrl, metadata).flatMap(FluxUtil::toMono); } /** * Copies a blob or file to a destination file within the storage account. * * <p><strong>Code Samples</strong></p> * * <p>Copy file from source url to the {@code filePath} </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.startCopyWithResponse * * <p>For more information, see the * <a href="https: * * @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the naming rules. * @return A response containing the {@link FileCopyInfo file copy info} and the status of copying the file. * @see <a href="https: */ public Mono<Response<FileCopyInfo>> startCopyWithResponse(String sourceUrl, Map<String, String> metadata) { return withContext(context -> startCopyWithResponse(sourceUrl, metadata, context)); } Mono<Response<FileCopyInfo>> startCopyWithResponse(String sourceUrl, Map<String, String> metadata, Context context) { return postProcessResponse(azureFileStorageClient.files() .startCopyWithRestResponseAsync(shareName, filePath, sourceUrl, null, metadata, context)) .map(this::startCopyResponse); } /** * Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. * * <p><strong>Code Samples</strong></p> * * <p>Abort copy file from copy id("someCopyId") </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.abortCopy * * <p>For more information, see the * <a href="https: * * @param copyId Specifies the copy id which has copying pending status associate with it. * @return An empty response. */ public Mono<Void> abortCopy(String copyId) { return abortCopyWithResponse(copyId).flatMap(FluxUtil::toMono); } /** * Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. * * <p><strong>Code Samples</strong></p> * * <p>Abort copy file from copy id("someCopyId") </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.abortCopyWithResponse * * <p>For more information, see the * <a href="https: * * @param copyId Specifies the copy id which has copying pending status associate with it. * @return A response containing the status of aborting copy the file. */ public Mono<VoidResponse> abortCopyWithResponse(String copyId) { return withContext(context -> abortCopyWithResponse(copyId, context)); } Mono<VoidResponse> abortCopyWithResponse(String copyId, Context context) { return postProcessResponse(azureFileStorageClient.files() .abortCopyWithRestResponseAsync(shareName, filePath, copyId, context)) .map(VoidResponse::new); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file to current folder. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @return An empty response. */ public Mono<Void> downloadToFile(String downloadFilePath) { return downloadToFile(downloadFilePath, null); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes to current folder. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @param range Optional byte range which returns file data only from the specified range. * @return An empty response. */ public Mono<Void> downloadToFile(String downloadFilePath, FileRange range) { AsynchronousFileChannel channel = channelSetup(downloadFilePath); return sliceFileRange(range) .flatMap(chunk -> downloadWithPropertiesWithResponse(chunk, false) .map(dar -> dar.value().body()) .subscribeOn(Schedulers.elastic()) .flatMap(fbb -> FluxUtil.writeFile(fbb, channel, chunk.start() - (range == null ? 0 : range.start())) .subscribeOn(Schedulers.elastic()) .timeout(Duration.ofSeconds(DOWNLOAD_UPLOAD_CHUNK_TIMEOUT)) .retry(3, throwable -> throwable instanceof IOException || throwable instanceof TimeoutException))) .then() .doOnTerminate(() -> channelCleanUp(channel)); } private AsynchronousFileChannel channelSetup(String filePath) { try { return AsynchronousFileChannel.open(Paths.get(filePath), StandardOpenOption.READ, StandardOpenOption.WRITE); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } private void channelCleanUp(AsynchronousFileChannel channel) { try { channel.close(); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } private Flux<FileRange> sliceFileRange(FileRange fileRange) { long offset = fileRange == null ? 0L : fileRange.start(); Mono<Long> end; if (fileRange != null) { end = Mono.just(fileRange.end()); } else { end = Mono.empty(); } end = end.switchIfEmpty(getProperties().map(FileProperties::contentLength)); return end .map(e -> { List<FileRange> chunks = new ArrayList<>(); for (long pos = offset; pos < e; pos += FILE_DEFAULT_BLOCK_SIZE) { long count = FILE_DEFAULT_BLOCK_SIZE; if (pos + count > e) { count = e - pos; } chunks.add(new FileRange(pos, pos + count - 1)); } return chunks; }) .flatMapMany(Flux::fromIterable); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file with its metadata and properties. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadWithProperties} * * <p>For more information, see the * <a href="https: * * @return The {@link FileDownloadInfo file download Info} */ public Mono<FileDownloadInfo> downloadWithProperties() { return downloadWithPropertiesWithResponse(null, null).flatMap(FluxUtil::toMono); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadWithPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param range Optional byte range which returns file data only from the specified range. * @param rangeGetContentMD5 Optional boolean which the service returns the MD5 hash for the range when it sets * to true, as long as the range is less than or equal to 4 MB in size. * @return A response containing the {@link FileDownloadInfo file download Info} with headers and response status code */ public Mono<Response<FileDownloadInfo>> downloadWithPropertiesWithResponse(FileRange range, Boolean rangeGetContentMD5) { return withContext(context -> downloadWithPropertiesWithResponse(range, rangeGetContentMD5, context)); } Mono<Response<FileDownloadInfo>> downloadWithPropertiesWithResponse(FileRange range, Boolean rangeGetContentMD5, Context context) { String rangeString = range == null ? null : range.toString(); return postProcessResponse(azureFileStorageClient.files() .downloadWithRestResponseAsync(shareName, filePath, null, rangeString, rangeGetContentMD5, context)) .map(this::downloadWithPropertiesResponse); } /** * Deletes the file associate with the client. * * <p><strong>Code Samples</strong></p> * * <p>Delete the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.delete} * * <p>For more information, see the * <a href="https: * @return An empty response * @throws StorageException If the directory doesn't exist or the file doesn't exist. */ public Mono<Void> delete() { return deleteWithResponse(null).flatMap(FluxUtil::toMono); } /** * Deletes the file associate with the client. * * <p><strong>Code Samples</strong></p> * * <p>Delete the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.deleteWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response that only contains headers and response status code * @throws StorageException If the directory doesn't exist or the file doesn't exist. */ public Mono<VoidResponse> deleteWithResponse() { return withContext(this::deleteWithResponse); } Mono<VoidResponse> deleteWithResponse(Context context) { return postProcessResponse(azureFileStorageClient.files() .deleteWithRestResponseAsync(shareName, filePath, context)) .map(VoidResponse::new); } /** * Retrieves the properties of the storage account's file. The properties includes file metadata, last modified * date, is server encrypted, and eTag. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve file properties</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.getProperties} * * <p>For more information, see the * <a href="https: * * @return {@link FileProperties Storage file properties} */ public Mono<FileProperties> getProperties() { return getPropertiesWithResponse().flatMap(FluxUtil::toMono); } /** * Retrieves the properties of the storage account's file. * The properties includes file metadata, last modified date, is server encrypted, and eTag. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve file properties</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.getPropertiesWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response containing the {@link FileProperties storage file properties} and response status code */ public Mono<Response<FileProperties>> getPropertiesWithResponse() { return withContext(this::getPropertiesWithResponse); } Mono<Response<FileProperties>> getPropertiesWithResponse(Context context) { return postProcessResponse(azureFileStorageClient.files() .getPropertiesWithRestResponseAsync(shareName, filePath, snapshot, null, context)) .map(this::getPropertiesResponse); } /** * Sets the user-defined httpHeaders to associate to the file. * * <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the httpHeaders of contentType of "text/plain"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeaders * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeaders * * <p>For more information, see the * <a href="https: * * @param newFileSize New file size of the file * @param httpHeaders Resizes a file to the specified size. If the specified byte value is less than the current size of the file, then all ranges above the specified byte value are cleared. * @return The {@link FileInfo file info} * @throws IllegalArgumentException thrown if parameters fail the validation. */ public Mono<FileInfo> setHttpHeaders(long newFileSize, FileHTTPHeaders httpHeaders) { return setHttpHeadersWithResponse(newFileSize, httpHeaders).flatMap(FluxUtil::toMono); } /** * Sets the user-defined httpHeaders to associate to the file. * * <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the httpHeaders of contentType of "text/plain"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeadersWithResponse * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeadersWithResponse * * <p>For more information, see the * <a href="https: * * @param newFileSize New file size of the file * @param httpHeaders Resizes a file to the specified size. If the specified byte value is less than the current size of the file, then all ranges above the specified byte value are cleared. * @return Response containing the {@link FileInfo file info} and response status code * @throws IllegalArgumentException thrown if parameters fail the validation. */ public Mono<Response<FileInfo>> setHttpHeadersWithResponse(long newFileSize, FileHTTPHeaders httpHeaders) { return withContext(context -> setHttpHeadersWithResponse(newFileSize, httpHeaders, context)); } Mono<Response<FileInfo>> setHttpHeadersWithResponse(long newFileSize, FileHTTPHeaders httpHeaders, Context context) { String fileAttributes = "None"; String filePermission = "inherit"; String fileCreationTime = "preserve"; String fileLastWriteTime = "preserve"; return postProcessResponse(azureFileStorageClient.files() .setHTTPHeadersWithRestResponseAsync(shareName, filePath, fileAttributes, fileCreationTime, fileLastWriteTime, null, newFileSize, filePermission, null, httpHeaders, context)) .map(this::setHttpHeadersResponse); } /** * Sets the user-defined metadata to associate to the file. * * <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the metadata to "file:updatedMetadata"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadata * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared * @return {@link FileMetadataInfo file meta info} * @throws StorageException If the file doesn't exist or the metadata contains invalid keys */ public Mono<FileMetadataInfo> setMetadata(Map<String, String> metadata) { return setMetadataWithResponse(metadata).flatMap(FluxUtil::toMono); } /** * Sets the user-defined metadata to associate to the file. * * <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the metadata to "file:updatedMetadata"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadataWithResponse * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared * @return A response containing the {@link FileMetadataInfo file meta info} and status code * @throws StorageException If the file doesn't exist or the metadata contains invalid keys */ public Mono<Response<FileMetadataInfo>> setMetadataWithResponse(Map<String, String> metadata) { return withContext(context -> setMetadataWithResponse(metadata, context)); } Mono<Response<FileMetadataInfo>> setMetadataWithResponse(Map<String, String> metadata, Context context) { return postProcessResponse(azureFileStorageClient.files() .setMetadataWithRestResponseAsync(shareName, filePath, null, metadata, context)) .map(this::setMetadataResponse); } /** * Uploads a range of bytes to the beginning of a file in storage file service. Upload operations performs an * in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload data "default" to the file in Storage File Service. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.upload * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. * @return A response that only contains headers and response status code */ public Mono<FileUploadInfo> upload(Flux<ByteBuffer> data, long length) { return uploadWithResponse(data, length).flatMap(FluxUtil::toMono); } /** * Uploads a range of bytes to the beginning of a file in storage file service. Upload operations performs an in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload "default" to the file. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.uploadWithResponse * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. When the FileRangeWriteType is set to clear, the value of this header must be set to zero.. * @return A response containing the {@link FileUploadInfo file upload info} with headers and response status code * @throws StorageException If you attempt to upload a range that is larger than 4 MB, the service returns status code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length) { return withContext(context -> uploadWithResponse(data, length, context)); } Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length, Context context) { FileRange range = new FileRange(0, length - 1); return postProcessResponse(azureFileStorageClient.files() .uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, length, data, null, null, context)) .map(this::uploadResponse); } /** * Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload data "default" starting from 1024 bytes. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.upload * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code null} * @return The {@link FileUploadInfo file upload info} * @throws StorageException If you attempt to upload a range that is larger than 4 MB, the service returns status code 413 (Request Entity Too Large) */ public Mono<FileUploadInfo> upload(Flux<ByteBuffer> data, long length, long offset) { return uploadWithResponse(data, length, offset).flatMap(FluxUtil::toMono); } /** * Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.uploadWithResponse * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code null} * @param length Specifies the number of bytes being transmitted in the request body. When the FileRangeWriteType is set to clear, the value of this header must be set to zero. * @return A response containing the {@link FileUploadInfo file upload info} with headers and response status code * @throws StorageException If you attempt to upload a range that is larger than 4 MB, the service returns status code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length, long offset) { return withContext(context -> uploadWithResponse(data, length, offset, context)); } Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length, long offset, Context context) { FileRange range = new FileRange(offset, offset + length - 1); return postProcessResponse(azureFileStorageClient.files() .uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, length, data, null, null, context)) .map(this::uploadResponse); } /** * Clear a range of bytes to specific of a file in storage file service. Clear operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Clears the first 1024 bytes. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.clearRange * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being cleared. * @return The {@link FileUploadInfo file upload info} */ public Mono<FileUploadInfo> clearRange(long length) { return clearRangeWithResponse(length, 0).flatMap(FluxUtil::toMono); } /** * Clear a range of bytes to specific of a file in storage file service. Clear operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Clear the range starting from 1024 with length of 1024. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.clearRange * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being cleared in the request body. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code null} * @return A response of {@link FileUploadInfo file upload info} that only contains headers and response status code */ public Mono<Response<FileUploadInfo>> clearRangeWithResponse(long length, long offset) { return withContext(context -> clearRangeWithResponse(length, offset, context)); } Mono<Response<FileUploadInfo>> clearRangeWithResponse(long length, long offset, Context context) { FileRange range = new FileRange(offset, offset + length - 1); return postProcessResponse(azureFileStorageClient.files() .uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.CLEAR, 0L, null, null, null, context)) .map(this::uploadResponse); } /** * Uploads file to storage file service. * * <p><strong>Code Samples</strong></p> * * <p> Upload the file from the source file path. </p> * * (@codesnippet com.azure.storage.file.fileAsyncClient.uploadFromFile
class FileAsyncClient { private final ClientLogger logger = new ClientLogger(FileAsyncClient.class); private static final long FILE_DEFAULT_BLOCK_SIZE = 4 * 1024 * 1024L; private static final long DOWNLOAD_UPLOAD_CHUNK_TIMEOUT = 300; private final AzureFileStorageImpl azureFileStorageClient; private final String shareName; private final String filePath; private final String snapshot; /** * Creates a FileAsyncClient that sends requests to the storage file at {@link AzureFileStorageImpl * endpoint}. Each service call goes through the {@link HttpPipeline pipeline} in the {@code client}. * * @param azureFileStorageClient Client that interacts with the service interfaces * @param shareName Name of the share * @param filePath Path to the file * @param snapshot The snapshot of the share */ FileAsyncClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String filePath, String snapshot) { Objects.requireNonNull(shareName); Objects.requireNonNull(filePath); this.shareName = shareName; this.filePath = filePath; this.snapshot = snapshot; this.azureFileStorageClient = azureFileStorageClient; } /** * Creates a FileAsyncClient that sends requests to the storage account at {@code endpoint}. Each service call goes * through the {@code httpPipeline}. * * @param endpoint URL for the Storage File service * @param httpPipeline HttpPipeline that HTTP requests and response flow through * @param shareName Name of the share * @param filePath Path to the file * @param snapshot Optional snapshot of the share */ FileAsyncClient(URL endpoint, HttpPipeline httpPipeline, String shareName, String filePath, String snapshot) { Objects.requireNonNull(shareName); Objects.requireNonNull(filePath); this.shareName = shareName; this.filePath = filePath; this.snapshot = snapshot; this.azureFileStorageClient = new AzureFileStorageBuilder().pipeline(httpPipeline) .url(endpoint.toString()) .build(); } /** * Get the url of the storage file client. * * @return the URL of the storage file client * @throws RuntimeException If the file is using a malformed URL. */ public URL getFileUrl() { try { return new URL(azureFileStorageClient.getUrl()); } catch (MalformedURLException e) { throw logger.logExceptionAsError(new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), azureFileStorageClient.getUrl()), e)); } } /** * Creates a file in the storage account and returns a response of {@link FileInfo} to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with size 1KB.</p> * * {@codesnippet com.azure.storage.file.fileClient.create} * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @return A response containing the file info and the status of creating the file. * @throws StorageException If the file has already existed, the parent directory does not exist or fileName is * an invalid resource name. */ public Mono<FileInfo> create(long maxSize) { return createWithResponse(maxSize, null, null).flatMap(FluxUtil::toMono); } /** * Creates a file in the storage account and returns a response of FileInfo to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with length of 1024 bytes, some headers and metadata.</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.createWithResponse * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @param httpHeaders Additional parameters for the operation. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the naming rules. * @return A response containing the {@link FileInfo file info} and the status of creating the file. * @throws StorageException If the directory has already existed, the parent directory does not exist or directory is an invalid resource name. */ public Mono<Response<FileInfo>> createWithResponse(long maxSize, FileHTTPHeaders httpHeaders, Map<String, String> metadata) { return withContext(context -> createWithResponse(maxSize, httpHeaders, metadata, context)); } Mono<Response<FileInfo>> createWithResponse(long maxSize, FileHTTPHeaders httpHeaders, Map<String, String> metadata, Context context) { String fileAttributes = "None"; String filePermission = "inherit"; String fileCreationTime = "now"; String fileLastWriteTime = "now"; return postProcessResponse(azureFileStorageClient.files() .createWithRestResponseAsync(shareName, filePath, maxSize, fileAttributes, fileCreationTime, fileLastWriteTime, null, metadata, filePermission, null, httpHeaders, context)) .map(this::createFileInfoResponse); } /** * Copies a blob or file to a destination file within the storage account. * * <p><strong>Code Samples</strong></p> * * <p>Copy file from source url to the {@code filePath} </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.startCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the naming rules. * @return The {@link FileCopyInfo file copy info}. * @see <a href="https: */ public Mono<FileCopyInfo> startCopy(String sourceUrl, Map<String, String> metadata) { return startCopyWithResponse(sourceUrl, metadata).flatMap(FluxUtil::toMono); } /** * Copies a blob or file to a destination file within the storage account. * * <p><strong>Code Samples</strong></p> * * <p>Copy file from source url to the {@code filePath} </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.startCopyWithResponse * * <p>For more information, see the * <a href="https: * * @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the naming rules. * @return A response containing the {@link FileCopyInfo file copy info} and the status of copying the file. * @see <a href="https: */ public Mono<Response<FileCopyInfo>> startCopyWithResponse(String sourceUrl, Map<String, String> metadata) { return withContext(context -> startCopyWithResponse(sourceUrl, metadata, context)); } Mono<Response<FileCopyInfo>> startCopyWithResponse(String sourceUrl, Map<String, String> metadata, Context context) { return postProcessResponse(azureFileStorageClient.files() .startCopyWithRestResponseAsync(shareName, filePath, sourceUrl, null, metadata, context)) .map(this::startCopyResponse); } /** * Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. * * <p><strong>Code Samples</strong></p> * * <p>Abort copy file from copy id("someCopyId") </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.abortCopy * * <p>For more information, see the * <a href="https: * * @param copyId Specifies the copy id which has copying pending status associate with it. * @return An empty response. */ public Mono<Void> abortCopy(String copyId) { return abortCopyWithResponse(copyId).flatMap(FluxUtil::toMono); } /** * Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. * * <p><strong>Code Samples</strong></p> * * <p>Abort copy file from copy id("someCopyId") </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.abortCopyWithResponse * * <p>For more information, see the * <a href="https: * * @param copyId Specifies the copy id which has copying pending status associate with it. * @return A response containing the status of aborting copy the file. */ public Mono<VoidResponse> abortCopyWithResponse(String copyId) { return withContext(context -> abortCopyWithResponse(copyId, context)); } Mono<VoidResponse> abortCopyWithResponse(String copyId, Context context) { return postProcessResponse(azureFileStorageClient.files() .abortCopyWithRestResponseAsync(shareName, filePath, copyId, context)) .map(VoidResponse::new); } /** * Downloads a file from the system, including its metadata and properties into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p><strong>Code Samples</strong></p> * * <p>Download the file to current folder. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @return An empty response. */ public Mono<Void> downloadToFile(String downloadFilePath) { return downloadToFile(downloadFilePath, null); } /** * Downloads a file from the system, including its metadata and properties into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes to current folder. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @param range Optional byte range which returns file data only from the specified range. * @return An empty response. */ public Mono<Void> downloadToFile(String downloadFilePath, FileRange range) { return Mono.using(() -> channelSetup(downloadFilePath, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW), channel -> sliceFileRange(range) .flatMap(chunk -> downloadWithPropertiesWithResponse(chunk, false) .map(dar -> dar.value().body()) .subscribeOn(Schedulers.elastic()) .flatMap(fbb -> FluxUtil.writeFile(fbb, channel, chunk.start() - (range == null ? 0 : range.start())) .subscribeOn(Schedulers.elastic()) .timeout(Duration.ofSeconds(DOWNLOAD_UPLOAD_CHUNK_TIMEOUT)) .retry(3, throwable -> throwable instanceof IOException || throwable instanceof TimeoutException))) .then(), this::channelCleanUp); } private AsynchronousFileChannel channelSetup(String filePath, OpenOption... options) { try { return AsynchronousFileChannel.open(Paths.get(filePath), options); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } private void channelCleanUp(AsynchronousFileChannel channel) { try { channel.close(); } catch (IOException e) { throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(e))); } } private Flux<FileRange> sliceFileRange(FileRange fileRange) { long offset = fileRange == null ? 0L : fileRange.start(); Mono<Long> end; if (fileRange != null) { end = Mono.just(fileRange.end()); } else { end = Mono.empty(); } end = end.switchIfEmpty(getProperties().map(FileProperties::contentLength)); return end .map(e -> { List<FileRange> chunks = new ArrayList<>(); for (long pos = offset; pos < e; pos += FILE_DEFAULT_BLOCK_SIZE) { long count = FILE_DEFAULT_BLOCK_SIZE; if (pos + count > e) { count = e - pos; } chunks.add(new FileRange(pos, pos + count - 1)); } return chunks; }) .flatMapMany(Flux::fromIterable); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file with its metadata and properties. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadWithProperties} * * <p>For more information, see the * <a href="https: * * @return The {@link FileDownloadInfo file download Info} */ public Mono<FileDownloadInfo> downloadWithProperties() { return downloadWithPropertiesWithResponse(null, null).flatMap(FluxUtil::toMono); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadWithPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param range Optional byte range which returns file data only from the specified range. * @param rangeGetContentMD5 Optional boolean which the service returns the MD5 hash for the range when it sets * to true, as long as the range is less than or equal to 4 MB in size. * @return A response containing the {@link FileDownloadInfo file download Info} with headers and response status code */ public Mono<Response<FileDownloadInfo>> downloadWithPropertiesWithResponse(FileRange range, Boolean rangeGetContentMD5) { return withContext(context -> downloadWithPropertiesWithResponse(range, rangeGetContentMD5, context)); } Mono<Response<FileDownloadInfo>> downloadWithPropertiesWithResponse(FileRange range, Boolean rangeGetContentMD5, Context context) { String rangeString = range == null ? null : range.toString(); return postProcessResponse(azureFileStorageClient.files() .downloadWithRestResponseAsync(shareName, filePath, null, rangeString, rangeGetContentMD5, context)) .map(this::downloadWithPropertiesResponse); } /** * Deletes the file associate with the client. * * <p><strong>Code Samples</strong></p> * * <p>Delete the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.delete} * * <p>For more information, see the * <a href="https: * @return An empty response * @throws StorageException If the directory doesn't exist or the file doesn't exist. */ public Mono<Void> delete() { return deleteWithResponse(null).flatMap(FluxUtil::toMono); } /** * Deletes the file associate with the client. * * <p><strong>Code Samples</strong></p> * * <p>Delete the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.deleteWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response that only contains headers and response status code * @throws StorageException If the directory doesn't exist or the file doesn't exist. */ public Mono<VoidResponse> deleteWithResponse() { return withContext(this::deleteWithResponse); } Mono<VoidResponse> deleteWithResponse(Context context) { return postProcessResponse(azureFileStorageClient.files() .deleteWithRestResponseAsync(shareName, filePath, context)) .map(VoidResponse::new); } /** * Retrieves the properties of the storage account's file. The properties includes file metadata, last modified * date, is server encrypted, and eTag. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve file properties</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.getProperties} * * <p>For more information, see the * <a href="https: * * @return {@link FileProperties Storage file properties} */ public Mono<FileProperties> getProperties() { return getPropertiesWithResponse().flatMap(FluxUtil::toMono); } /** * Retrieves the properties of the storage account's file. * The properties includes file metadata, last modified date, is server encrypted, and eTag. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve file properties</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.getPropertiesWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response containing the {@link FileProperties storage file properties} and response status code */ public Mono<Response<FileProperties>> getPropertiesWithResponse() { return withContext(this::getPropertiesWithResponse); } Mono<Response<FileProperties>> getPropertiesWithResponse(Context context) { return postProcessResponse(azureFileStorageClient.files() .getPropertiesWithRestResponseAsync(shareName, filePath, snapshot, null, context)) .map(this::getPropertiesResponse); } /** * Sets the user-defined httpHeaders to associate to the file. * * <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the httpHeaders of contentType of "text/plain"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeaders * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeaders * * <p>For more information, see the * <a href="https: * * @param newFileSize New file size of the file * @param httpHeaders Resizes a file to the specified size. If the specified byte value is less than the current size of the file, then all ranges above the specified byte value are cleared. * @return The {@link FileInfo file info} * @throws IllegalArgumentException thrown if parameters fail the validation. */ public Mono<FileInfo> setHttpHeaders(long newFileSize, FileHTTPHeaders httpHeaders) { return setHttpHeadersWithResponse(newFileSize, httpHeaders).flatMap(FluxUtil::toMono); } /** * Sets the user-defined httpHeaders to associate to the file. * * <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the httpHeaders of contentType of "text/plain"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeadersWithResponse * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setHttpHeadersWithResponse * * <p>For more information, see the * <a href="https: * * @param newFileSize New file size of the file * @param httpHeaders Resizes a file to the specified size. If the specified byte value is less than the current size of the file, then all ranges above the specified byte value are cleared. * @return Response containing the {@link FileInfo file info} and response status code * @throws IllegalArgumentException thrown if parameters fail the validation. */ public Mono<Response<FileInfo>> setHttpHeadersWithResponse(long newFileSize, FileHTTPHeaders httpHeaders) { return withContext(context -> setHttpHeadersWithResponse(newFileSize, httpHeaders, context)); } Mono<Response<FileInfo>> setHttpHeadersWithResponse(long newFileSize, FileHTTPHeaders httpHeaders, Context context) { String fileAttributes = "None"; String filePermission = "inherit"; String fileCreationTime = "preserve"; String fileLastWriteTime = "preserve"; return postProcessResponse(azureFileStorageClient.files() .setHTTPHeadersWithRestResponseAsync(shareName, filePath, fileAttributes, fileCreationTime, fileLastWriteTime, null, newFileSize, filePermission, null, httpHeaders, context)) .map(this::setHttpHeadersResponse); } /** * Sets the user-defined metadata to associate to the file. * * <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the metadata to "file:updatedMetadata"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadata * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared * @return {@link FileMetadataInfo file meta info} * @throws StorageException If the file doesn't exist or the metadata contains invalid keys */ public Mono<FileMetadataInfo> setMetadata(Map<String, String> metadata) { return setMetadataWithResponse(metadata).flatMap(FluxUtil::toMono); } /** * Sets the user-defined metadata to associate to the file. * * <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the metadata to "file:updatedMetadata"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadataWithResponse * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared * @return A response containing the {@link FileMetadataInfo file meta info} and status code * @throws StorageException If the file doesn't exist or the metadata contains invalid keys */ public Mono<Response<FileMetadataInfo>> setMetadataWithResponse(Map<String, String> metadata) { return withContext(context -> setMetadataWithResponse(metadata, context)); } Mono<Response<FileMetadataInfo>> setMetadataWithResponse(Map<String, String> metadata, Context context) { return postProcessResponse(azureFileStorageClient.files() .setMetadataWithRestResponseAsync(shareName, filePath, null, metadata, context)) .map(this::setMetadataResponse); } /** * Uploads a range of bytes to the beginning of a file in storage file service. Upload operations performs an * in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload data "default" to the file in Storage File Service. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.upload * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. * @return A response that only contains headers and response status code */ public Mono<FileUploadInfo> upload(Flux<ByteBuffer> data, long length) { return uploadWithResponse(data, length).flatMap(FluxUtil::toMono); } /** * Uploads a range of bytes to the beginning of a file in storage file service. Upload operations performs an in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload "default" to the file. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.uploadWithResponse * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. When the FileRangeWriteType is set to clear, the value of this header must be set to zero.. * @return A response containing the {@link FileUploadInfo file upload info} with headers and response status code * @throws StorageException If you attempt to upload a range that is larger than 4 MB, the service returns status code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length) { return withContext(context -> uploadWithResponse(data, length, context)); } Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length, Context context) { FileRange range = new FileRange(0, length - 1); return postProcessResponse(azureFileStorageClient.files() .uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, length, data, null, null, context)) .map(this::uploadResponse); } /** * Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload data "default" starting from 1024 bytes. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.upload * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code null} * @return The {@link FileUploadInfo file upload info} * @throws StorageException If you attempt to upload a range that is larger than 4 MB, the service returns status code 413 (Request Entity Too Large) */ public Mono<FileUploadInfo> upload(Flux<ByteBuffer> data, long length, long offset) { return uploadWithResponse(data, length, offset).flatMap(FluxUtil::toMono); } /** * Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.uploadWithResponse * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code null} * @param length Specifies the number of bytes being transmitted in the request body. When the FileRangeWriteType is set to clear, the value of this header must be set to zero. * @return A response containing the {@link FileUploadInfo file upload info} with headers and response status code * @throws StorageException If you attempt to upload a range that is larger than 4 MB, the service returns status code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length, long offset) { return withContext(context -> uploadWithResponse(data, length, offset, context)); } Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length, long offset, Context context) { FileRange range = new FileRange(offset, offset + length - 1); return postProcessResponse(azureFileStorageClient.files() .uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, length, data, null, null, context)) .map(this::uploadResponse); } /** * Clear a range of bytes to specific of a file in storage file service. Clear operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Clears the first 1024 bytes. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.clearRange * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being cleared. * @return The {@link FileUploadInfo file upload info} */ public Mono<FileUploadInfo> clearRange(long length) { return clearRangeWithResponse(length, 0).flatMap(FluxUtil::toMono); } /** * Clear a range of bytes to specific of a file in storage file service. Clear operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Clear the range starting from 1024 with length of 1024. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.clearRange * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being cleared in the request body. * @param offset Optional starting point of the upload range. It will start from the beginning if it is {@code null} * @return A response of {@link FileUploadInfo file upload info} that only contains headers and response status code */ public Mono<Response<FileUploadInfo>> clearRangeWithResponse(long length, long offset) { return withContext(context -> clearRangeWithResponse(length, offset, context)); } Mono<Response<FileUploadInfo>> clearRangeWithResponse(long length, long offset, Context context) { FileRange range = new FileRange(offset, offset + length - 1); return postProcessResponse(azureFileStorageClient.files() .uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.CLEAR, 0L, null, null, null, context)) .map(this::uploadResponse); } /** * Uploads file to storage file service. * * <p><strong>Code Samples</strong></p> * * <p> Upload the file from the source file path. </p> * * (@codesnippet com.azure.storage.file.fileAsyncClient.uploadFromFile
we should consider to make 30 *100 a constant.
private void configureChannelPipelineHandlers() { this.httpClient = this.httpClient.tcpConfiguration(tcpClient -> { if (this.httpClientConfig.getProxy() != null) { tcpClient = tcpClient.proxy(typeSpec -> typeSpec.type(ProxyProvider.Proxy.HTTP).address(this.httpClientConfig.getProxy())); } tcpClient = tcpClient.secure(sslContextSpec -> sslContextSpec.sslContext(this.httpClientConfig.getConfigs().getSslContext())); if (LoggerFactory.getLogger(REACTOR_NETWORK_LOG_CATEGORY).isTraceEnabled()) { tcpClient = tcpClient.wiretap(REACTOR_NETWORK_LOG_CATEGORY, LogLevel.INFO); } tcpClient = tcpClient.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 30 * 1000); return tcpClient; }); }
tcpClient = tcpClient.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 30 * 1000);
private void configureChannelPipelineHandlers() { this.httpClient = this.httpClient.tcpConfiguration(tcpClient -> { if (this.httpClientConfig.getProxy() != null) { tcpClient = tcpClient.proxy(typeSpec -> typeSpec.type(ProxyProvider.Proxy.HTTP).address(this.httpClientConfig.getProxy())); } tcpClient = tcpClient.secure(sslContextSpec -> sslContextSpec.sslContext(this.httpClientConfig.getConfigs().getSslContext())); if (LoggerFactory.getLogger(REACTOR_NETWORK_LOG_CATEGORY).isTraceEnabled()) { tcpClient = tcpClient.wiretap(REACTOR_NETWORK_LOG_CATEGORY, LogLevel.INFO); } tcpClient = tcpClient.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, httpClientConfig.getConfigs().getConnectionAcquireTimeoutInMillis()); return tcpClient; }); }
class ReactorNettyClient implements HttpClient { private static final Logger logger = LoggerFactory.getLogger(ReactorNettyClient.class.getSimpleName()); private HttpClientConfig httpClientConfig; private reactor.netty.http.client.HttpClient httpClient; private ConnectionProvider connectionProvider; private ReactorNettyClient() {} /** * Creates ReactorNettyClient with un-pooled connection. */ public static ReactorNettyClient create(HttpClientConfig httpClientConfig) { ReactorNettyClient reactorNettyClient = new ReactorNettyClient(); reactorNettyClient.httpClientConfig = httpClientConfig; reactorNettyClient.httpClient = reactor.netty.http.client.HttpClient.newConnection(); reactorNettyClient.configureChannelPipelineHandlers(); return reactorNettyClient; } /** * Creates ReactorNettyClient with {@link ConnectionProvider}. */ public static ReactorNettyClient createWithConnectionProvider(ConnectionProvider connectionProvider, HttpClientConfig httpClientConfig) { ReactorNettyClient reactorNettyClient = new ReactorNettyClient(); reactorNettyClient.connectionProvider = connectionProvider; reactorNettyClient.httpClientConfig = httpClientConfig; reactorNettyClient.httpClient = reactor.netty.http.client.HttpClient.create(connectionProvider); reactorNettyClient.configureChannelPipelineHandlers(); return reactorNettyClient; } @Override public Mono<HttpResponse> send(final HttpRequest request) { Objects.requireNonNull(request.httpMethod()); Objects.requireNonNull(request.uri()); Objects.requireNonNull(this.httpClientConfig); return this.httpClient .keepAlive(this.httpClientConfig.isConnectionKeepAlive()) .port(request.port()) .request(HttpMethod.valueOf(request.httpMethod().toString())) .uri(request.uri().toString()) .send(bodySendDelegate(request)) .responseConnection(responseDelegate(request)) .single(); } /** * Delegate to send the request content. * * @param restRequest the Rest request contains the body to be sent * @return a delegate upon invocation sets the request body in reactor-netty outbound object */ private static BiFunction<HttpClientRequest, NettyOutbound, Publisher<Void>> bodySendDelegate(final HttpRequest restRequest) { return (reactorNettyRequest, reactorNettyOutbound) -> { for (HttpHeader header : restRequest.headers()) { reactorNettyRequest.header(header.name(), header.value()); } if (restRequest.body() != null) { Flux<ByteBuf> nettyByteBufFlux = restRequest.body().map(Unpooled::wrappedBuffer); return reactorNettyOutbound.send(nettyByteBufFlux); } else { return reactorNettyOutbound; } }; } /** * Delegate to receive response. * * @param restRequest the Rest request whose response this delegate handles * @return a delegate upon invocation setup Rest response object */ private static BiFunction<HttpClientResponse, Connection, Publisher<HttpResponse>> responseDelegate(final HttpRequest restRequest) { return (reactorNettyResponse, reactorNettyConnection) -> Mono.just(new ReactorNettyHttpResponse(reactorNettyResponse, reactorNettyConnection).withRequest(restRequest)); } @Override public void shutdown() { if (this.connectionProvider != null) { this.connectionProvider.dispose(); } } private static class ReactorNettyHttpResponse extends HttpResponse { private final HttpClientResponse reactorNettyResponse; private final Connection reactorNettyConnection; ReactorNettyHttpResponse(HttpClientResponse reactorNettyResponse, Connection reactorNettyConnection) { this.reactorNettyResponse = reactorNettyResponse; this.reactorNettyConnection = reactorNettyConnection; } @Override public int statusCode() { return reactorNettyResponse.status().code(); } @Override public String headerValue(String name) { return reactorNettyResponse.responseHeaders().get(name); } @Override public HttpHeaders headers() { HttpHeaders headers = new HttpHeaders(reactorNettyResponse.responseHeaders().size()); reactorNettyResponse.responseHeaders().forEach(e -> headers.set(e.getKey(), e.getValue())); return headers; } @Override public Flux<ByteBuf> body() { return bodyIntern().doFinally(s -> this.close()); } @Override public Flux<InputStream> bodyAsInputStream() { return bodyIntern().asInputStream().doFinally(s -> this.close()); } @Override public Mono<byte[]> bodyAsByteArray() { return bodyIntern().aggregate().asByteArray().doFinally(s -> this.close()); } @Override public Mono<String> bodyAsString() { return bodyIntern().aggregate().asString().doFinally(s -> this.close()); } @Override public Mono<String> bodyAsString(Charset charset) { return bodyIntern().aggregate().asString(charset).doFinally(s -> this.close()); } @Override public void close() { if (reactorNettyConnection.channel().eventLoop().inEventLoop()) { reactorNettyConnection.dispose(); } else { reactorNettyConnection.channel().eventLoop().execute(reactorNettyConnection::dispose); } } private ByteBufFlux bodyIntern() { return reactorNettyConnection.inbound().receive(); } @Override Connection internConnection() { return reactorNettyConnection; } } }
class ReactorNettyClient implements HttpClient { private static final Logger logger = LoggerFactory.getLogger(ReactorNettyClient.class.getSimpleName()); private HttpClientConfig httpClientConfig; private reactor.netty.http.client.HttpClient httpClient; private ConnectionProvider connectionProvider; private ReactorNettyClient() {} /** * Creates ReactorNettyClient with un-pooled connection. */ public static ReactorNettyClient create(HttpClientConfig httpClientConfig) { ReactorNettyClient reactorNettyClient = new ReactorNettyClient(); reactorNettyClient.httpClientConfig = httpClientConfig; reactorNettyClient.httpClient = reactor.netty.http.client.HttpClient.newConnection(); reactorNettyClient.configureChannelPipelineHandlers(); return reactorNettyClient; } /** * Creates ReactorNettyClient with {@link ConnectionProvider}. */ public static ReactorNettyClient createWithConnectionProvider(ConnectionProvider connectionProvider, HttpClientConfig httpClientConfig) { ReactorNettyClient reactorNettyClient = new ReactorNettyClient(); reactorNettyClient.connectionProvider = connectionProvider; reactorNettyClient.httpClientConfig = httpClientConfig; reactorNettyClient.httpClient = reactor.netty.http.client.HttpClient.create(connectionProvider); reactorNettyClient.configureChannelPipelineHandlers(); return reactorNettyClient; } @Override public Mono<HttpResponse> send(final HttpRequest request) { Objects.requireNonNull(request.httpMethod()); Objects.requireNonNull(request.uri()); Objects.requireNonNull(this.httpClientConfig); return this.httpClient .keepAlive(this.httpClientConfig.isConnectionKeepAlive()) .port(request.port()) .request(HttpMethod.valueOf(request.httpMethod().toString())) .uri(request.uri().toString()) .send(bodySendDelegate(request)) .responseConnection(responseDelegate(request)) .single(); } /** * Delegate to send the request content. * * @param restRequest the Rest request contains the body to be sent * @return a delegate upon invocation sets the request body in reactor-netty outbound object */ private static BiFunction<HttpClientRequest, NettyOutbound, Publisher<Void>> bodySendDelegate(final HttpRequest restRequest) { return (reactorNettyRequest, reactorNettyOutbound) -> { for (HttpHeader header : restRequest.headers()) { reactorNettyRequest.header(header.name(), header.value()); } if (restRequest.body() != null) { Flux<ByteBuf> nettyByteBufFlux = restRequest.body().map(Unpooled::wrappedBuffer); return reactorNettyOutbound.send(nettyByteBufFlux); } else { return reactorNettyOutbound; } }; } /** * Delegate to receive response. * * @param restRequest the Rest request whose response this delegate handles * @return a delegate upon invocation setup Rest response object */ private static BiFunction<HttpClientResponse, Connection, Publisher<HttpResponse>> responseDelegate(final HttpRequest restRequest) { return (reactorNettyResponse, reactorNettyConnection) -> Mono.just(new ReactorNettyHttpResponse(reactorNettyResponse, reactorNettyConnection).withRequest(restRequest)); } @Override public void shutdown() { if (this.connectionProvider != null) { this.connectionProvider.dispose(); } } private static class ReactorNettyHttpResponse extends HttpResponse { private final HttpClientResponse reactorNettyResponse; private final Connection reactorNettyConnection; ReactorNettyHttpResponse(HttpClientResponse reactorNettyResponse, Connection reactorNettyConnection) { this.reactorNettyResponse = reactorNettyResponse; this.reactorNettyConnection = reactorNettyConnection; } @Override public int statusCode() { return reactorNettyResponse.status().code(); } @Override public String headerValue(String name) { return reactorNettyResponse.responseHeaders().get(name); } @Override public HttpHeaders headers() { HttpHeaders headers = new HttpHeaders(reactorNettyResponse.responseHeaders().size()); reactorNettyResponse.responseHeaders().forEach(e -> headers.set(e.getKey(), e.getValue())); return headers; } @Override public Flux<ByteBuf> body() { return bodyIntern().doFinally(s -> this.close()); } @Override public Mono<byte[]> bodyAsByteArray() { return bodyIntern().aggregate().asByteArray().doFinally(s -> this.close()); } @Override public Mono<String> bodyAsString() { return bodyIntern().aggregate().asString().doFinally(s -> this.close()); } @Override public Mono<String> bodyAsString(Charset charset) { return bodyIntern().aggregate().asString(charset).doFinally(s -> this.close()); } @Override public void close() { if (reactorNettyConnection.channel().eventLoop().inEventLoop()) { reactorNettyConnection.dispose(); } else { reactorNettyConnection.channel().eventLoop().execute(reactorNettyConnection::dispose); } } private ByteBufFlux bodyIntern() { return reactorNettyConnection.inbound().receive(); } @Override Connection internConnection() { return reactorNettyConnection; } } }
Why are we adding this sort? This is already a segment of a paged response, what's the point of spending cycles sorting every 1000 file subset of a larger list?
private List<FileRef> convertResponseAndGetNumOfResults(DirectorysListFilesAndDirectoriesSegmentResponse response) { List<FileRef> fileRefs = new ArrayList<>(); if (response.value().segment() != null) { response.value().segment().directoryItems().forEach(directoryItem -> fileRefs.add(new FileRef(directoryItem.name(), true, null))); response.value().segment().fileItems().forEach(fileItem -> fileRefs.add(new FileRef(fileItem.name(), false, fileItem.properties()))); } fileRefs.sort(Comparator.comparing(FileRef::name)); return fileRefs; }
fileRefs.sort(Comparator.comparing(FileRef::name));
private List<FileRef> convertResponseAndGetNumOfResults(DirectorysListFilesAndDirectoriesSegmentResponse response) { Set<FileRef> fileRefs = new TreeSet<>(Comparator.comparing(FileRef::name)); if (response.value().segment() != null) { response.value().segment().directoryItems() .forEach(directoryItem -> fileRefs.add(new FileRef(directoryItem.name(), true, null))); response.value().segment().fileItems() .forEach(fileItem -> fileRefs.add(new FileRef(fileItem.name(), false, fileItem.properties()))); } return new ArrayList<>(fileRefs); }
class DirectoryAsyncClient { private final ClientLogger logger = new ClientLogger(DirectoryAsyncClient.class); private final AzureFileStorageImpl azureFileStorageClient; private final String shareName; private final String directoryPath; private final String snapshot; /** * Creates a DirectoryAsyncClient that sends requests to the storage directory at {@link AzureFileStorageImpl * Each service call goes through the {@link HttpPipeline pipeline} in the {@code client}. * @param azureFileStorageClient Client that interacts with the service interfaces * @param shareName Name of the share * @param directoryPath Name of the directory * @param snapshot The snapshot of the share */ DirectoryAsyncClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String directoryPath, String snapshot) { Objects.requireNonNull(shareName); Objects.requireNonNull(directoryPath); this.shareName = shareName; this.directoryPath = directoryPath; this.snapshot = snapshot; this.azureFileStorageClient = azureFileStorageClient; } /** * Get the url of the storage directory client. * @return the URL of the storage directory client * @throws RuntimeException If the directory is using a malformed URL. */ public URL getDirectoryUrl() { try { return new URL(azureFileStorageClient.getUrl()); } catch (MalformedURLException e) { throw logger.logExceptionAsError(new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), azureFileStorageClient.getUrl()), e)); } } /** * Constructs a FileAsyncClient that interacts with the specified file. * * <p>If the file doesn't exist in this directory {@link FileAsyncClient
class DirectoryAsyncClient { private final ClientLogger logger = new ClientLogger(DirectoryAsyncClient.class); private final AzureFileStorageImpl azureFileStorageClient; private final String shareName; private final String directoryPath; private final String snapshot; /** * Creates a DirectoryAsyncClient that sends requests to the storage directory at {@link AzureFileStorageImpl * Each service call goes through the {@link HttpPipeline pipeline} in the {@code client}. * @param azureFileStorageClient Client that interacts with the service interfaces * @param shareName Name of the share * @param directoryPath Name of the directory * @param snapshot The snapshot of the share */ DirectoryAsyncClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String directoryPath, String snapshot) { Objects.requireNonNull(shareName); Objects.requireNonNull(directoryPath); this.shareName = shareName; this.directoryPath = directoryPath; this.snapshot = snapshot; this.azureFileStorageClient = azureFileStorageClient; } /** * Get the url of the storage directory client. * @return the URL of the storage directory client * @throws RuntimeException If the directory is using a malformed URL. */ public URL getDirectoryUrl() { try { return new URL(azureFileStorageClient.getUrl()); } catch (MalformedURLException e) { throw logger.logExceptionAsError(new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), azureFileStorageClient.getUrl()), e)); } } /** * Constructs a FileAsyncClient that interacts with the specified file. * * <p>If the file doesn't exist in this directory {@link FileAsyncClient
The REST spec states that the response is lexically sorted with directories and files intermingled and I wanted to align on this. https://docs.microsoft.com/en-us/rest/api/storageservices/list-directories-and-files#remarks
private List<FileRef> convertResponseAndGetNumOfResults(DirectorysListFilesAndDirectoriesSegmentResponse response) { List<FileRef> fileRefs = new ArrayList<>(); if (response.value().segment() != null) { response.value().segment().directoryItems().forEach(directoryItem -> fileRefs.add(new FileRef(directoryItem.name(), true, null))); response.value().segment().fileItems().forEach(fileItem -> fileRefs.add(new FileRef(fileItem.name(), false, fileItem.properties()))); } fileRefs.sort(Comparator.comparing(FileRef::name)); return fileRefs; }
fileRefs.sort(Comparator.comparing(FileRef::name));
private List<FileRef> convertResponseAndGetNumOfResults(DirectorysListFilesAndDirectoriesSegmentResponse response) { Set<FileRef> fileRefs = new TreeSet<>(Comparator.comparing(FileRef::name)); if (response.value().segment() != null) { response.value().segment().directoryItems() .forEach(directoryItem -> fileRefs.add(new FileRef(directoryItem.name(), true, null))); response.value().segment().fileItems() .forEach(fileItem -> fileRefs.add(new FileRef(fileItem.name(), false, fileItem.properties()))); } return new ArrayList<>(fileRefs); }
class DirectoryAsyncClient { private final ClientLogger logger = new ClientLogger(DirectoryAsyncClient.class); private final AzureFileStorageImpl azureFileStorageClient; private final String shareName; private final String directoryPath; private final String snapshot; /** * Creates a DirectoryAsyncClient that sends requests to the storage directory at {@link AzureFileStorageImpl * Each service call goes through the {@link HttpPipeline pipeline} in the {@code client}. * @param azureFileStorageClient Client that interacts with the service interfaces * @param shareName Name of the share * @param directoryPath Name of the directory * @param snapshot The snapshot of the share */ DirectoryAsyncClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String directoryPath, String snapshot) { Objects.requireNonNull(shareName); Objects.requireNonNull(directoryPath); this.shareName = shareName; this.directoryPath = directoryPath; this.snapshot = snapshot; this.azureFileStorageClient = azureFileStorageClient; } /** * Get the url of the storage directory client. * @return the URL of the storage directory client * @throws RuntimeException If the directory is using a malformed URL. */ public URL getDirectoryUrl() { try { return new URL(azureFileStorageClient.getUrl()); } catch (MalformedURLException e) { throw logger.logExceptionAsError(new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), azureFileStorageClient.getUrl()), e)); } } /** * Constructs a FileAsyncClient that interacts with the specified file. * * <p>If the file doesn't exist in this directory {@link FileAsyncClient
class DirectoryAsyncClient { private final ClientLogger logger = new ClientLogger(DirectoryAsyncClient.class); private final AzureFileStorageImpl azureFileStorageClient; private final String shareName; private final String directoryPath; private final String snapshot; /** * Creates a DirectoryAsyncClient that sends requests to the storage directory at {@link AzureFileStorageImpl * Each service call goes through the {@link HttpPipeline pipeline} in the {@code client}. * @param azureFileStorageClient Client that interacts with the service interfaces * @param shareName Name of the share * @param directoryPath Name of the directory * @param snapshot The snapshot of the share */ DirectoryAsyncClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String directoryPath, String snapshot) { Objects.requireNonNull(shareName); Objects.requireNonNull(directoryPath); this.shareName = shareName; this.directoryPath = directoryPath; this.snapshot = snapshot; this.azureFileStorageClient = azureFileStorageClient; } /** * Get the url of the storage directory client. * @return the URL of the storage directory client * @throws RuntimeException If the directory is using a malformed URL. */ public URL getDirectoryUrl() { try { return new URL(azureFileStorageClient.getUrl()); } catch (MalformedURLException e) { throw logger.logExceptionAsError(new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), azureFileStorageClient.getUrl()), e)); } } /** * Constructs a FileAsyncClient that interacts with the specified file. * * <p>If the file doesn't exist in this directory {@link FileAsyncClient
If the rest spec says it's sorted, wouldn't it already be sorted?
private List<FileRef> convertResponseAndGetNumOfResults(DirectorysListFilesAndDirectoriesSegmentResponse response) { List<FileRef> fileRefs = new ArrayList<>(); if (response.value().segment() != null) { response.value().segment().directoryItems().forEach(directoryItem -> fileRefs.add(new FileRef(directoryItem.name(), true, null))); response.value().segment().fileItems().forEach(fileItem -> fileRefs.add(new FileRef(fileItem.name(), false, fileItem.properties()))); } fileRefs.sort(Comparator.comparing(FileRef::name)); return fileRefs; }
fileRefs.sort(Comparator.comparing(FileRef::name));
private List<FileRef> convertResponseAndGetNumOfResults(DirectorysListFilesAndDirectoriesSegmentResponse response) { Set<FileRef> fileRefs = new TreeSet<>(Comparator.comparing(FileRef::name)); if (response.value().segment() != null) { response.value().segment().directoryItems() .forEach(directoryItem -> fileRefs.add(new FileRef(directoryItem.name(), true, null))); response.value().segment().fileItems() .forEach(fileItem -> fileRefs.add(new FileRef(fileItem.name(), false, fileItem.properties()))); } return new ArrayList<>(fileRefs); }
class DirectoryAsyncClient { private final ClientLogger logger = new ClientLogger(DirectoryAsyncClient.class); private final AzureFileStorageImpl azureFileStorageClient; private final String shareName; private final String directoryPath; private final String snapshot; /** * Creates a DirectoryAsyncClient that sends requests to the storage directory at {@link AzureFileStorageImpl * Each service call goes through the {@link HttpPipeline pipeline} in the {@code client}. * @param azureFileStorageClient Client that interacts with the service interfaces * @param shareName Name of the share * @param directoryPath Name of the directory * @param snapshot The snapshot of the share */ DirectoryAsyncClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String directoryPath, String snapshot) { Objects.requireNonNull(shareName); Objects.requireNonNull(directoryPath); this.shareName = shareName; this.directoryPath = directoryPath; this.snapshot = snapshot; this.azureFileStorageClient = azureFileStorageClient; } /** * Get the url of the storage directory client. * @return the URL of the storage directory client * @throws RuntimeException If the directory is using a malformed URL. */ public URL getDirectoryUrl() { try { return new URL(azureFileStorageClient.getUrl()); } catch (MalformedURLException e) { throw logger.logExceptionAsError(new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), azureFileStorageClient.getUrl()), e)); } } /** * Constructs a FileAsyncClient that interacts with the specified file. * * <p>If the file doesn't exist in this directory {@link FileAsyncClient
class DirectoryAsyncClient { private final ClientLogger logger = new ClientLogger(DirectoryAsyncClient.class); private final AzureFileStorageImpl azureFileStorageClient; private final String shareName; private final String directoryPath; private final String snapshot; /** * Creates a DirectoryAsyncClient that sends requests to the storage directory at {@link AzureFileStorageImpl * Each service call goes through the {@link HttpPipeline pipeline} in the {@code client}. * @param azureFileStorageClient Client that interacts with the service interfaces * @param shareName Name of the share * @param directoryPath Name of the directory * @param snapshot The snapshot of the share */ DirectoryAsyncClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String directoryPath, String snapshot) { Objects.requireNonNull(shareName); Objects.requireNonNull(directoryPath); this.shareName = shareName; this.directoryPath = directoryPath; this.snapshot = snapshot; this.azureFileStorageClient = azureFileStorageClient; } /** * Get the url of the storage directory client. * @return the URL of the storage directory client * @throws RuntimeException If the directory is using a malformed URL. */ public URL getDirectoryUrl() { try { return new URL(azureFileStorageClient.getUrl()); } catch (MalformedURLException e) { throw logger.logExceptionAsError(new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), azureFileStorageClient.getUrl()), e)); } } /** * Constructs a FileAsyncClient that interacts with the specified file. * * <p>If the file doesn't exist in this directory {@link FileAsyncClient
They should sort in our return type in fileRef. The line here is trying to sort out based on FileRef name instead of sort separately based on dir and file.
private List<FileRef> convertResponseAndGetNumOfResults(DirectorysListFilesAndDirectoriesSegmentResponse response) { List<FileRef> fileRefs = new ArrayList<>(); if (response.value().segment() != null) { response.value().segment().directoryItems().forEach(directoryItem -> fileRefs.add(new FileRef(directoryItem.name(), true, null))); response.value().segment().fileItems().forEach(fileItem -> fileRefs.add(new FileRef(fileItem.name(), false, fileItem.properties()))); } fileRefs.sort(Comparator.comparing(FileRef::name)); return fileRefs; }
fileRefs.sort(Comparator.comparing(FileRef::name));
private List<FileRef> convertResponseAndGetNumOfResults(DirectorysListFilesAndDirectoriesSegmentResponse response) { Set<FileRef> fileRefs = new TreeSet<>(Comparator.comparing(FileRef::name)); if (response.value().segment() != null) { response.value().segment().directoryItems() .forEach(directoryItem -> fileRefs.add(new FileRef(directoryItem.name(), true, null))); response.value().segment().fileItems() .forEach(fileItem -> fileRefs.add(new FileRef(fileItem.name(), false, fileItem.properties()))); } return new ArrayList<>(fileRefs); }
class DirectoryAsyncClient { private final ClientLogger logger = new ClientLogger(DirectoryAsyncClient.class); private final AzureFileStorageImpl azureFileStorageClient; private final String shareName; private final String directoryPath; private final String snapshot; /** * Creates a DirectoryAsyncClient that sends requests to the storage directory at {@link AzureFileStorageImpl * Each service call goes through the {@link HttpPipeline pipeline} in the {@code client}. * @param azureFileStorageClient Client that interacts with the service interfaces * @param shareName Name of the share * @param directoryPath Name of the directory * @param snapshot The snapshot of the share */ DirectoryAsyncClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String directoryPath, String snapshot) { Objects.requireNonNull(shareName); Objects.requireNonNull(directoryPath); this.shareName = shareName; this.directoryPath = directoryPath; this.snapshot = snapshot; this.azureFileStorageClient = azureFileStorageClient; } /** * Get the url of the storage directory client. * @return the URL of the storage directory client * @throws RuntimeException If the directory is using a malformed URL. */ public URL getDirectoryUrl() { try { return new URL(azureFileStorageClient.getUrl()); } catch (MalformedURLException e) { throw logger.logExceptionAsError(new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), azureFileStorageClient.getUrl()), e)); } } /** * Constructs a FileAsyncClient that interacts with the specified file. * * <p>If the file doesn't exist in this directory {@link FileAsyncClient
class DirectoryAsyncClient { private final ClientLogger logger = new ClientLogger(DirectoryAsyncClient.class); private final AzureFileStorageImpl azureFileStorageClient; private final String shareName; private final String directoryPath; private final String snapshot; /** * Creates a DirectoryAsyncClient that sends requests to the storage directory at {@link AzureFileStorageImpl * Each service call goes through the {@link HttpPipeline pipeline} in the {@code client}. * @param azureFileStorageClient Client that interacts with the service interfaces * @param shareName Name of the share * @param directoryPath Name of the directory * @param snapshot The snapshot of the share */ DirectoryAsyncClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String directoryPath, String snapshot) { Objects.requireNonNull(shareName); Objects.requireNonNull(directoryPath); this.shareName = shareName; this.directoryPath = directoryPath; this.snapshot = snapshot; this.azureFileStorageClient = azureFileStorageClient; } /** * Get the url of the storage directory client. * @return the URL of the storage directory client * @throws RuntimeException If the directory is using a malformed URL. */ public URL getDirectoryUrl() { try { return new URL(azureFileStorageClient.getUrl()); } catch (MalformedURLException e) { throw logger.logExceptionAsError(new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), azureFileStorageClient.getUrl()), e)); } } /** * Constructs a FileAsyncClient that interacts with the specified file. * * <p>If the file doesn't exist in this directory {@link FileAsyncClient
Typo, missing the 'c' in context
public void createWithResponse() { DirectoryClient directoryClient = createClientWithSASToken(); Response<DirectoryInfo> response = directoryClient.createWithResponse( Collections.singletonMap("directory", "metadata"), Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Completed creating the directory with status code: " + response.statusCode()); }
public void createWithResponse() { DirectoryClient directoryClient = createClientWithSASToken(); FileSmbProperties smbProperties = new FileSmbProperties(); String filePermission = "filePermission"; Response<DirectoryInfo> response = directoryClient.createWithResponse(smbProperties, filePermission, Collections.singletonMap("directory", "metadata"), Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Completed creating the directory with status code: " + response.statusCode()); }
class DirectoryJavaDocCodeSamples { private String key1 = "key1"; private String value1 = "val1"; /** * Generates code sample for {@link DirectoryClient} instantiation. */ public void initialization() { DirectoryClient client = new FileClientBuilder() .connectionString("${connectionString}") .endpoint("${endpoint}") .buildDirectoryClient(); } /** * Generates code sample for creating a {@link DirectoryClient} with {@link SASTokenCredential} * @return An instance of {@link DirectoryClient} */ public DirectoryClient createClientWithSASToken() { DirectoryClient directoryClient = new FileClientBuilder() .endpoint("https: .shareName("myshare") .resourcePath("mydirectory") .buildDirectoryClient(); return directoryClient; } /** * Generates code sample for creating a {@link DirectoryClient} with {@link SASTokenCredential} * @return An instance of {@link DirectoryClient} */ public DirectoryClient createClientWithCredential() { DirectoryClient directoryClient = new FileClientBuilder() .endpoint("https: .credential(SASTokenCredential.fromQueryParameters(Utility.parseQueryString("${SASTokenQueryParams}"))) .shareName("myshare") .resourcePath("mydirectory") .buildDirectoryClient(); return directoryClient; } /** * Generates code sample for creating a {@link DirectoryClient} with {@code connectionString} which turns into {@link SharedKeyCredential} * @return An instance of {@link DirectoryClient} */ public DirectoryClient createClientWithConnectionString() { String connectionString = "DefaultEndpointsProtocol=https;AccountName={name};AccountKey={key}" + ";EndpointSuffix={core.windows.net}"; DirectoryClient directoryClient = new FileClientBuilder() .connectionString(connectionString) .shareName("myshare") .resourcePath("mydirectory") .buildDirectoryClient(); return directoryClient; } /** * Generates a code sample for using {@link DirectoryClient */ public void createDirectory() { DirectoryClient directoryClient = createClientWithSASToken(); directoryClient.create(); System.out.println("Completed creating the directory. "); } /** * Generates a code sample for using {@link DirectoryClient */ /** * Generates a code sample for using {@link DirectoryClient */ public void createSubDirectory() { DirectoryClient directoryClient = createClientWithSASToken(); directoryClient.createSubDirectory("subdir"); System.out.println("Completed creating the subdirectory."); } /** * Generates a code sample for using {@link DirectoryClient * Duration, Context)} */ public void createSubDirectoryMaxOverload() { DirectoryClient directoryClient = createClientWithSASToken(); Response<DirectoryClient> response = directoryClient.createSubDirectoryWithResponse("subdir", Collections.singletonMap("directory", "metadata"), Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("Creating the sub directory completed with status code %d", response.statusCode()); } /** * Generates a code sample for using {@link DirectoryClient */ public void createFile() { DirectoryClient directoryClient = createClientWithSASToken(); FileClient response = directoryClient.createFile("myfile", 1024); System.out.println("Completed creating the file: " + response); } /** * Generates a code sample for using {@link DirectoryClient * Map, Duration, Context)} */ public void createFileMaxOverload() { DirectoryClient directoryClient = createClientWithSASToken(); FileHTTPHeaders httpHeaders = new FileHTTPHeaders().fileContentType("text/plain"); Response<FileClient> response = directoryClient.createFileWithResponse("myFile", 1024, httpHeaders, Collections.singletonMap("directory", "metadata"), Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Completed creating the file with status code: " + response.statusCode()); } /** * Generates a code sample for using {@link DirectoryClient */ public void listDirectoriesAndFiles() { DirectoryClient directoryClient = createClientWithSASToken(); directoryClient.listFilesAndDirectories().forEach( fileRef -> System.out.printf("Is the resource a directory? %b. The resource name is: %s.", fileRef.isDirectory(), fileRef.name()) ); } /** * Generates a code sample for using {@link DirectoryClient */ public void listDirectoriesAndFilesMaxOverload() { DirectoryClient directoryClient = createClientWithSASToken(); directoryClient.listFilesAndDirectories("subdir", 10, Duration.ofSeconds(1)).forEach( fileRef -> System.out.printf("Is the resource a directory? %b. The resource name is: %s.", fileRef.isDirectory(), fileRef.name()) ); } /** * Generates a code sample for using {@link DirectoryClient
class DirectoryJavaDocCodeSamples { private String key1 = "key1"; private String value1 = "val1"; /** * Generates code sample for {@link DirectoryClient} instantiation. */ public void initialization() { DirectoryClient client = new FileClientBuilder() .connectionString("${connectionString}") .endpoint("${endpoint}") .buildDirectoryClient(); } /** * Generates code sample for creating a {@link DirectoryClient} with {@link SASTokenCredential} * @return An instance of {@link DirectoryClient} */ public DirectoryClient createClientWithSASToken() { DirectoryClient directoryClient = new FileClientBuilder() .endpoint("https: .shareName("myshare") .resourcePath("mydirectory") .buildDirectoryClient(); return directoryClient; } /** * Generates code sample for creating a {@link DirectoryClient} with {@link SASTokenCredential} * @return An instance of {@link DirectoryClient} */ public DirectoryClient createClientWithCredential() { DirectoryClient directoryClient = new FileClientBuilder() .endpoint("https: .credential(SASTokenCredential.fromQueryParameters(Utility.parseQueryString("${SASTokenQueryParams}"))) .shareName("myshare") .resourcePath("mydirectory") .buildDirectoryClient(); return directoryClient; } /** * Generates code sample for creating a {@link DirectoryClient} with {@code connectionString} which turns into {@link SharedKeyCredential} * @return An instance of {@link DirectoryClient} */ public DirectoryClient createClientWithConnectionString() { String connectionString = "DefaultEndpointsProtocol=https;AccountName={name};AccountKey={key}" + ";EndpointSuffix={core.windows.net}"; DirectoryClient directoryClient = new FileClientBuilder() .connectionString(connectionString) .shareName("myshare") .resourcePath("mydirectory") .buildDirectoryClient(); return directoryClient; } /** * Generates a code sample for using {@link DirectoryClient */ public void createDirectory() { DirectoryClient directoryClient = createClientWithSASToken(); directoryClient.create(); System.out.println("Completed creating the directory. "); } /** * Generates a code sample for using {@link DirectoryClient * Duration, Context)} */ /** * Generates a code sample for using {@link DirectoryClient */ public void createSubDirectory() { DirectoryClient directoryClient = createClientWithSASToken(); directoryClient.createSubDirectory("subdir"); System.out.println("Completed creating the subdirectory."); } /** * Generates a code sample for using {@link DirectoryClient */ public void createSubDirectoryMaxOverload() { DirectoryClient directoryClient = createClientWithSASToken(); FileSmbProperties smbProperties = new FileSmbProperties(); String filePermission = "filePermission"; Response<DirectoryClient> response = directoryClient.createSubDirectoryWithResponse("subdir", smbProperties, filePermission, Collections.singletonMap("directory", "metadata"), Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("Creating the sub directory completed with status code %d", response.statusCode()); } /** * Generates a code sample for using {@link DirectoryClient */ public void createFile() { DirectoryClient directoryClient = createClientWithSASToken(); FileClient response = directoryClient.createFile("myfile", 1024); System.out.println("Completed creating the file: " + response); } /** * Generates a code sample for using {@link DirectoryClient */ public void createFileMaxOverload() { DirectoryClient directoryClient = createClientWithSASToken(); FileHTTPHeaders httpHeaders = new FileHTTPHeaders() .fileContentType("text/html") .fileContentEncoding("gzip") .fileContentLanguage("en") .fileCacheControl("no-transform") .fileContentDisposition("attachment"); FileSmbProperties smbProperties = new FileSmbProperties() .ntfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY)) .fileCreationTime(OffsetDateTime.now()) .fileLastWriteTime(OffsetDateTime.now()) .filePermissionKey("filePermissionKey"); String filePermission = "filePermission"; Response<FileClient> response = directoryClient.createFileWithResponse("myFile", 1024, httpHeaders, smbProperties, filePermission, Collections.singletonMap("directory", "metadata"), Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Completed creating the file with status code: " + response.statusCode()); } /** * Generates a code sample for using {@link DirectoryClient */ public void listDirectoriesAndFiles() { DirectoryClient directoryClient = createClientWithSASToken(); directoryClient.listFilesAndDirectories().forEach( fileRef -> System.out.printf("Is the resource a directory? %b. The resource name is: %s.", fileRef.isDirectory(), fileRef.name()) ); } /** * Generates a code sample for using {@link DirectoryClient */ public void listDirectoriesAndFilesMaxOverload() { DirectoryClient directoryClient = createClientWithSASToken(); directoryClient.listFilesAndDirectories("subdir", 10, Duration.ofSeconds(1), new Context(key1, value1)).forEach( fileRef -> System.out.printf("Is the resource a directory? %b. The resource name is: %s.", fileRef.isDirectory(), fileRef.name()) ); } /** * Generates a code sample for using {@link DirectoryClient
Same as the other code snippet class around the duration.
public void createWithResponse() { DirectoryClient directoryClient = createClientWithSASToken(); Response<DirectoryInfo> response = directoryClient.createWithResponse( Collections.singletonMap("directory", "metadata"), Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Completed creating the directory with status code: " + response.statusCode()); }
Collections.singletonMap("directory", "metadata"), Duration.ofSeconds(1), new Context(key1, value1));
public void createWithResponse() { DirectoryClient directoryClient = createClientWithSASToken(); FileSmbProperties smbProperties = new FileSmbProperties(); String filePermission = "filePermission"; Response<DirectoryInfo> response = directoryClient.createWithResponse(smbProperties, filePermission, Collections.singletonMap("directory", "metadata"), Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Completed creating the directory with status code: " + response.statusCode()); }
class DirectoryJavaDocCodeSamples { private String key1 = "key1"; private String value1 = "val1"; /** * Generates code sample for {@link DirectoryClient} instantiation. */ public void initialization() { DirectoryClient client = new FileClientBuilder() .connectionString("${connectionString}") .endpoint("${endpoint}") .buildDirectoryClient(); } /** * Generates code sample for creating a {@link DirectoryClient} with {@link SASTokenCredential} * @return An instance of {@link DirectoryClient} */ public DirectoryClient createClientWithSASToken() { DirectoryClient directoryClient = new FileClientBuilder() .endpoint("https: .shareName("myshare") .resourcePath("mydirectory") .buildDirectoryClient(); return directoryClient; } /** * Generates code sample for creating a {@link DirectoryClient} with {@link SASTokenCredential} * @return An instance of {@link DirectoryClient} */ public DirectoryClient createClientWithCredential() { DirectoryClient directoryClient = new FileClientBuilder() .endpoint("https: .credential(SASTokenCredential.fromQueryParameters(Utility.parseQueryString("${SASTokenQueryParams}"))) .shareName("myshare") .resourcePath("mydirectory") .buildDirectoryClient(); return directoryClient; } /** * Generates code sample for creating a {@link DirectoryClient} with {@code connectionString} which turns into {@link SharedKeyCredential} * @return An instance of {@link DirectoryClient} */ public DirectoryClient createClientWithConnectionString() { String connectionString = "DefaultEndpointsProtocol=https;AccountName={name};AccountKey={key}" + ";EndpointSuffix={core.windows.net}"; DirectoryClient directoryClient = new FileClientBuilder() .connectionString(connectionString) .shareName("myshare") .resourcePath("mydirectory") .buildDirectoryClient(); return directoryClient; } /** * Generates a code sample for using {@link DirectoryClient */ public void createDirectory() { DirectoryClient directoryClient = createClientWithSASToken(); directoryClient.create(); System.out.println("Completed creating the directory. "); } /** * Generates a code sample for using {@link DirectoryClient */ /** * Generates a code sample for using {@link DirectoryClient */ public void createSubDirectory() { DirectoryClient directoryClient = createClientWithSASToken(); directoryClient.createSubDirectory("subdir"); System.out.println("Completed creating the subdirectory."); } /** * Generates a code sample for using {@link DirectoryClient * Duration, Context)} */ public void createSubDirectoryMaxOverload() { DirectoryClient directoryClient = createClientWithSASToken(); Response<DirectoryClient> response = directoryClient.createSubDirectoryWithResponse("subdir", Collections.singletonMap("directory", "metadata"), Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("Creating the sub directory completed with status code %d", response.statusCode()); } /** * Generates a code sample for using {@link DirectoryClient */ public void createFile() { DirectoryClient directoryClient = createClientWithSASToken(); FileClient response = directoryClient.createFile("myfile", 1024); System.out.println("Completed creating the file: " + response); } /** * Generates a code sample for using {@link DirectoryClient * Map, Duration, Context)} */ public void createFileMaxOverload() { DirectoryClient directoryClient = createClientWithSASToken(); FileHTTPHeaders httpHeaders = new FileHTTPHeaders().fileContentType("text/plain"); Response<FileClient> response = directoryClient.createFileWithResponse("myFile", 1024, httpHeaders, Collections.singletonMap("directory", "metadata"), Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Completed creating the file with status code: " + response.statusCode()); } /** * Generates a code sample for using {@link DirectoryClient */ public void listDirectoriesAndFiles() { DirectoryClient directoryClient = createClientWithSASToken(); directoryClient.listFilesAndDirectories().forEach( fileRef -> System.out.printf("Is the resource a directory? %b. The resource name is: %s.", fileRef.isDirectory(), fileRef.name()) ); } /** * Generates a code sample for using {@link DirectoryClient */ public void listDirectoriesAndFilesMaxOverload() { DirectoryClient directoryClient = createClientWithSASToken(); directoryClient.listFilesAndDirectories("subdir", 10, Duration.ofSeconds(1)).forEach( fileRef -> System.out.printf("Is the resource a directory? %b. The resource name is: %s.", fileRef.isDirectory(), fileRef.name()) ); } /** * Generates a code sample for using {@link DirectoryClient
class DirectoryJavaDocCodeSamples { private String key1 = "key1"; private String value1 = "val1"; /** * Generates code sample for {@link DirectoryClient} instantiation. */ public void initialization() { DirectoryClient client = new FileClientBuilder() .connectionString("${connectionString}") .endpoint("${endpoint}") .buildDirectoryClient(); } /** * Generates code sample for creating a {@link DirectoryClient} with {@link SASTokenCredential} * @return An instance of {@link DirectoryClient} */ public DirectoryClient createClientWithSASToken() { DirectoryClient directoryClient = new FileClientBuilder() .endpoint("https: .shareName("myshare") .resourcePath("mydirectory") .buildDirectoryClient(); return directoryClient; } /** * Generates code sample for creating a {@link DirectoryClient} with {@link SASTokenCredential} * @return An instance of {@link DirectoryClient} */ public DirectoryClient createClientWithCredential() { DirectoryClient directoryClient = new FileClientBuilder() .endpoint("https: .credential(SASTokenCredential.fromQueryParameters(Utility.parseQueryString("${SASTokenQueryParams}"))) .shareName("myshare") .resourcePath("mydirectory") .buildDirectoryClient(); return directoryClient; } /** * Generates code sample for creating a {@link DirectoryClient} with {@code connectionString} which turns into {@link SharedKeyCredential} * @return An instance of {@link DirectoryClient} */ public DirectoryClient createClientWithConnectionString() { String connectionString = "DefaultEndpointsProtocol=https;AccountName={name};AccountKey={key}" + ";EndpointSuffix={core.windows.net}"; DirectoryClient directoryClient = new FileClientBuilder() .connectionString(connectionString) .shareName("myshare") .resourcePath("mydirectory") .buildDirectoryClient(); return directoryClient; } /** * Generates a code sample for using {@link DirectoryClient */ public void createDirectory() { DirectoryClient directoryClient = createClientWithSASToken(); directoryClient.create(); System.out.println("Completed creating the directory. "); } /** * Generates a code sample for using {@link DirectoryClient * Duration, Context)} */ /** * Generates a code sample for using {@link DirectoryClient */ public void createSubDirectory() { DirectoryClient directoryClient = createClientWithSASToken(); directoryClient.createSubDirectory("subdir"); System.out.println("Completed creating the subdirectory."); } /** * Generates a code sample for using {@link DirectoryClient */ public void createSubDirectoryMaxOverload() { DirectoryClient directoryClient = createClientWithSASToken(); FileSmbProperties smbProperties = new FileSmbProperties(); String filePermission = "filePermission"; Response<DirectoryClient> response = directoryClient.createSubDirectoryWithResponse("subdir", smbProperties, filePermission, Collections.singletonMap("directory", "metadata"), Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("Creating the sub directory completed with status code %d", response.statusCode()); } /** * Generates a code sample for using {@link DirectoryClient */ public void createFile() { DirectoryClient directoryClient = createClientWithSASToken(); FileClient response = directoryClient.createFile("myfile", 1024); System.out.println("Completed creating the file: " + response); } /** * Generates a code sample for using {@link DirectoryClient */ public void createFileMaxOverload() { DirectoryClient directoryClient = createClientWithSASToken(); FileHTTPHeaders httpHeaders = new FileHTTPHeaders() .fileContentType("text/html") .fileContentEncoding("gzip") .fileContentLanguage("en") .fileCacheControl("no-transform") .fileContentDisposition("attachment"); FileSmbProperties smbProperties = new FileSmbProperties() .ntfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY)) .fileCreationTime(OffsetDateTime.now()) .fileLastWriteTime(OffsetDateTime.now()) .filePermissionKey("filePermissionKey"); String filePermission = "filePermission"; Response<FileClient> response = directoryClient.createFileWithResponse("myFile", 1024, httpHeaders, smbProperties, filePermission, Collections.singletonMap("directory", "metadata"), Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Completed creating the file with status code: " + response.statusCode()); } /** * Generates a code sample for using {@link DirectoryClient */ public void listDirectoriesAndFiles() { DirectoryClient directoryClient = createClientWithSASToken(); directoryClient.listFilesAndDirectories().forEach( fileRef -> System.out.printf("Is the resource a directory? %b. The resource name is: %s.", fileRef.isDirectory(), fileRef.name()) ); } /** * Generates a code sample for using {@link DirectoryClient */ public void listDirectoriesAndFilesMaxOverload() { DirectoryClient directoryClient = createClientWithSASToken(); directoryClient.listFilesAndDirectories("subdir", 10, Duration.ofSeconds(1), new Context(key1, value1)).forEach( fileRef -> System.out.printf("Is the resource a directory? %b. The resource name is: %s.", fileRef.isDirectory(), fileRef.name()) ); } /** * Generates a code sample for using {@link DirectoryClient
Didn't update the code snippet name
public void uploadWithResponse() { FileClient fileClient = createClientWithSASToken(); ByteBuffer defaultData = ByteBuffer.wrap("default".getBytes(StandardCharsets.UTF_8)); Response<FileUploadInfo> response = fileClient.uploadWithResponse(defaultData, defaultData.remaining(), Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Complete uploading the data with status code: " + response.statusCode()); }
public void uploadWithResponse() { FileClient fileClient = createClientWithSASToken(); ByteBuffer defaultData = ByteBuffer.wrap("default".getBytes(StandardCharsets.UTF_8)); Response<FileUploadInfo> response = fileClient.uploadWithResponse(defaultData, defaultData.remaining(), Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Complete uploading the data with status code: " + response.statusCode()); }
class FileJavaDocCodeSamples { private String key1 = "key1"; private String value1 = "val1"; /** * Generates code sample for {@link FileClient} instantiation. */ public void initialization() { FileClient client = new FileClientBuilder() .connectionString("${connectionString}") .endpoint("${endpoint}") .buildFileClient(); } /** * Generates code sample for creating a {@link FileClient} with {@link SASTokenCredential} * @return An instance of {@link FileClient} */ public FileClient createClientWithSASToken() { FileClient fileClient = new FileClientBuilder() .endpoint("https: .shareName("myshare") .resourcePath("myfilepath") .buildFileClient(); return fileClient; } /** * Generates code sample for creating a {@link FileClient} with {@link SASTokenCredential} * @return An instance of {@link FileClient} */ public FileClient createClientWithCredential() { FileClient fileClient = new FileClientBuilder() .endpoint("https: .credential(SASTokenCredential.fromQueryParameters(Utility.parseQueryString("${SASTokenQueryParams}"))) .shareName("myshare") .resourcePath("myfilepath") .buildFileClient(); return fileClient; } /** * Generates code sample for creating a {@link FileClient} with {@code connectionString} * which turns into {@link SharedKeyCredential} * @return An instance of {@link FileClient} */ public FileClient createClientWithConnectionString() { String connectionString = "DefaultEndpointsProtocol=https;AccountName={name};AccountKey={key};" + "EndpointSuffix={core.windows.net}"; FileClient fileClient = new FileClientBuilder() .connectionString(connectionString).shareName("myshare").resourcePath("myfilepath") .buildFileClient(); return fileClient; } /** * Generates a code sample for using {@link FileClient */ public void createFile() { FileClient fileClient = createClientWithSASToken(); FileInfo response = fileClient.create(1024); System.out.println("Complete creating the file."); } /** * Generates a code sample for using {@link FileClient * Duration, Context)} */ public void createWithResponse() { FileClient fileClient = createClientWithSASToken(); FileHTTPHeaders httpHeaders = new FileHTTPHeaders().fileContentType("text/plain"); Response<FileInfo> response = fileClient.createWithResponse(1024, httpHeaders, Collections.singletonMap("file", "updatedMetadata"), Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("Creating the file completed with status code %d", response.statusCode()); } /** * Generates a code sample for using {@link FileClient */ public void startCopy() { FileClient fileClient = createClientWithSASToken(); FileCopyInfo response = fileClient.startCopy( "https: Collections.singletonMap("file", "metadata")); System.out.println("Complete copying the file with copy Id: " + response.copyId()); } /** * Generates a code sample for using {@link FileClient */ public void startCopyWithResponse() { FileClient fileClient = createClientWithSASToken(); Response<FileCopyInfo> response = fileClient.startCopyWithResponse( "https: Collections.singletonMap("file", "metadata"), Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Complete copying the file with copy Id: " + response.value().copyId()); } /** * Generates a code sample for using {@link FileClient */ public void abortCopyFile() { FileClient fileClient = createClientWithSASToken(); fileClient.abortCopy("someCopyId"); System.out.printf("Abort copying the file completed."); } /** * Generates a code sample for using {@link FileClient */ public void abortCopyWithResponse() { FileClient fileClient = createClientWithSASToken(); VoidResponse response = fileClient.abortCopyWithResponse("someCopyId", Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("Abort copying the file completed with status code %d", response.statusCode()); } /** * Generates a code sample for using {@link FileClient */ public void uploadData() { FileClient fileClient = createClientWithSASToken(); ByteBuffer defaultData = ByteBuffer.wrap("default".getBytes(StandardCharsets.UTF_8)); FileUploadInfo response = fileClient.upload(defaultData, defaultData.remaining()); System.out.println("Complete uploading the data with eTag: " + response.eTag()); } /** * Generates a code sample for using {@link FileClient */ /** * Generates a code sample for using {@link FileClient * Duration, Context)} */ public void uploadWithResponseMaxOverload() { FileClient fileClient = createClientWithSASToken(); ByteBuffer defaultData = ByteBuffer.wrap("default".getBytes(StandardCharsets.UTF_8)); Response<FileUploadInfo> response = fileClient.uploadWithResponse(defaultData, defaultData.remaining(), 1024, Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Complete uploading the data with status code: " + response.statusCode()); } /** * Generates a code sample for using {@link FileClient */ public void clearRange() { FileClient fileClient = createClientWithSASToken(); FileUploadInfo response = fileClient.clearRange(1024); System.out.println("Complete clearing the range with eTag: " + response.eTag()); } /** * Generates a code sample for using {@link FileClient */ public void clearRangeMaxOverload() { FileClient fileClient = createClientWithSASToken(); Response<FileUploadInfo> response = fileClient.clearRangeWithResponse(1024, 1024, Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Complete clearing the range with status code: " + response.statusCode()); } /** * Generates a code sample for using {@link FileClient */ public void uploadFile() { FileClient fileClient = createClientWithSASToken(); fileClient.uploadFromFile("someFilePath"); } /** * Generates a code sample for using {@link FileClient */ public void uploadFileMaxOverload() { FileClient fileClient = createClientWithSASToken(); fileClient.uploadFromFile("someFilePath"); if (fileClient.getProperties() != null) { System.out.printf("Upload the file with length of %d completed", fileClient.getProperties().contentLength()); } } /** * Generates a code sample for using {@link FileClient */ public void downloadData() { FileClient fileClient = createClientWithSASToken(); FileDownloadInfo response = fileClient.downloadWithProperties(); System.out.println("Complete downloading the data."); response.body().subscribe( byteBuffer -> System.out.println("Complete downloading the data with body: " + new String(byteBuffer.array(), StandardCharsets.UTF_8)), error -> System.err.print(error.toString()), () -> System.out.println("Complete downloading the data!") ); } /** * Generates a code sample for using {@link FileClient * FileRange, Boolean, Duration, Context)} */ public void downloadWithPropertiesWithResponse() { FileClient fileClient = createClientWithSASToken(); Response<FileDownloadInfo> response = fileClient.downloadWithPropertiesWithResponse(new FileRange(1024, 2047L), false, Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Complete downloading the data with status code: " + response.statusCode()); response.value().body().subscribe( byteBuffer -> System.out.println("Complete downloading the data with body: " + new String(byteBuffer.array(), StandardCharsets.UTF_8)), error -> System.err.print(error.toString()), () -> System.out.println("Complete downloading the data!") ); } /** * Generates a code sample for using {@link FileClient */ public void downloadFile() { FileClient fileClient = createClientWithSASToken(); fileClient.downloadToFile("somelocalfilepath"); if (Files.exists(Paths.get("somelocalfilepath"))) { System.out.println("Complete downloading the file."); } } /** * Generates a code sample for using {@link FileClient */ public void downloadFileMaxOverload() { FileClient fileClient = createClientWithSASToken(); fileClient.downloadToFile("somelocalfilepath", new FileRange(1024, 2047L)); if (Files.exists(Paths.get("somelocalfilepath"))) { System.out.println("Complete downloading the file."); } } /** * Generates a code sample for using {@link FileClient */ public void deleteFile() { FileClient fileClient = createClientWithSASToken(); fileClient.delete(); System.out.println("Complete deleting the file."); } /** * Generates a code sample for using {@link FileClient */ public void deleteWithResponse() { FileClient fileClient = createClientWithSASToken(); VoidResponse response = fileClient.deleteWithResponse(Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Complete deleting the file with status code: " + response.statusCode()); } /** * Generates a code sample for using {@link FileClient */ public void getProperties() { FileClient fileClient = createClientWithSASToken(); FileProperties properties = fileClient.getProperties(); System.out.printf("File latest modified date is %s.", properties.lastModified()); } /** * Generates a code sample for using {@link FileClient */ public void getPropertiesWithResponse() { FileClient fileClient = createClientWithSASToken(); Response<FileProperties> response = fileClient.getPropertiesWithResponse( Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("File latest modified date is %s.", response.value().lastModified()); } /** * Generates a code sample for using {@link FileClient */ public void setMetadata() { FileClient fileClient = createClientWithSASToken(); fileClient.setMetadata(Collections.singletonMap("file", "updatedMetadata")); System.out.printf("Setting the file metadata completed."); } /** * Generates a code sample for using {@link FileClient */ public void setMetadataWithResponse() { FileClient fileClient = createClientWithSASToken(); Response<FileMetadataInfo> response = fileClient.setMetadataWithResponse( Collections.singletonMap("file", "updatedMetadata"), Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("Setting the file metadata completed with status code %d", response.statusCode()); } /** * Generates a code sample for using {@link FileClient * Duration, Context)} to clear metadata. */ public void clearMetadataWithResponse() { FileClient fileClient = createClientWithSASToken(); Response<FileMetadataInfo> response = fileClient.setMetadataWithResponse(null, Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("Setting the file metadata completed with status code %d", response.statusCode()); } /** * Generates a code sample for using {@link FileClient */ public void clearMetadata() { FileClient fileClient = createClientWithSASToken(); fileClient.setMetadataWithResponse(null, null, new Context(key1, value1)); System.out.printf("Setting the file metadata completed."); } /** * Generates a code sample for using {@link FileClient */ public void setHTTPHeaders() { FileClient fileClient = createClientWithSASToken(); FileHTTPHeaders httpHeaders = new FileHTTPHeaders().fileContentType("text/plain"); fileClient.setHttpHeaders(1024, httpHeaders); System.out.printf("Setting the file httpHeaders completed."); } /** * Generates a code sample for using {@link FileClient */ public void clearSyncHTTPHeaders() { FileClient fileClient = createClientWithSASToken(); FileInfo response = fileClient.setHttpHeaders(1024, null); System.out.printf("Setting the file httpHeaders completed."); } /** * Generates a code sample for using {@link FileClient * Duration, Context)} */ public void setHttpHeadersWithResponse() { FileClient fileClient = createClientWithSASToken(); FileHTTPHeaders httpHeaders = new FileHTTPHeaders().fileContentType("text/plain"); Response<FileInfo> response = fileClient.setHttpHeadersWithResponse(1024, httpHeaders, Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("Setting the file httpHeaders completed with status code %d", response.statusCode()); } /** * Generates a code sample for using {@link FileClient * Duration, Context)} * (long, FileHTTPHeaders)}
class FileJavaDocCodeSamples { private String key1 = "key1"; private String value1 = "val1"; /** * Generates code sample for {@link FileClient} instantiation. */ public void initialization() { FileClient client = new FileClientBuilder() .connectionString("${connectionString}") .endpoint("${endpoint}") .buildFileClient(); } /** * Generates code sample for creating a {@link FileClient} with {@link SASTokenCredential} * @return An instance of {@link FileClient} */ public FileClient createClientWithSASToken() { FileClient fileClient = new FileClientBuilder() .endpoint("https: .shareName("myshare") .resourcePath("myfilepath") .buildFileClient(); return fileClient; } /** * Generates code sample for creating a {@link FileClient} with {@link SASTokenCredential} * @return An instance of {@link FileClient} */ public FileClient createClientWithCredential() { FileClient fileClient = new FileClientBuilder() .endpoint("https: .credential(SASTokenCredential.fromQueryParameters(Utility.parseQueryString("${SASTokenQueryParams}"))) .shareName("myshare") .resourcePath("myfilepath") .buildFileClient(); return fileClient; } /** * Generates code sample for creating a {@link FileClient} with {@code connectionString} * which turns into {@link SharedKeyCredential} * @return An instance of {@link FileClient} */ public FileClient createClientWithConnectionString() { String connectionString = "DefaultEndpointsProtocol=https;AccountName={name};AccountKey={key};" + "EndpointSuffix={core.windows.net}"; FileClient fileClient = new FileClientBuilder() .connectionString(connectionString).shareName("myshare").resourcePath("myfilepath") .buildFileClient(); return fileClient; } /** * Generates a code sample for using {@link FileClient */ public void createFile() { FileClient fileClient = createClientWithSASToken(); FileInfo response = fileClient.create(1024); System.out.println("Complete creating the file."); } /** * Generates a code sample for using {@link FileClient * String, Map, Duration, Context)} */ public void createWithResponse() { FileClient fileClient = createClientWithSASToken(); FileHTTPHeaders httpHeaders = new FileHTTPHeaders() .fileContentType("text/html") .fileContentEncoding("gzip") .fileContentLanguage("en") .fileCacheControl("no-transform") .fileContentDisposition("attachment"); FileSmbProperties smbProperties = new FileSmbProperties() .ntfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY)) .fileCreationTime(OffsetDateTime.now()) .fileLastWriteTime(OffsetDateTime.now()) .filePermissionKey("filePermissionKey"); String filePermission = "filePermission"; Response<FileInfo> response = fileClient.createWithResponse(1024, httpHeaders, smbProperties, filePermission, Collections.singletonMap("directory", "metadata"), Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("Creating the file completed with status code %d", response.statusCode()); } /** * Generates a code sample for using {@link FileClient */ public void startCopy() { FileClient fileClient = createClientWithSASToken(); FileCopyInfo response = fileClient.startCopy( "https: Collections.singletonMap("file", "metadata")); System.out.println("Complete copying the file with copy Id: " + response.copyId()); } /** * Generates a code sample for using {@link FileClient */ public void startCopyWithResponse() { FileClient fileClient = createClientWithSASToken(); Response<FileCopyInfo> response = fileClient.startCopyWithResponse( "https: Collections.singletonMap("file", "metadata"), Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Complete copying the file with copy Id: " + response.value().copyId()); } /** * Generates a code sample for using {@link FileClient */ public void abortCopyFile() { FileClient fileClient = createClientWithSASToken(); fileClient.abortCopy("someCopyId"); System.out.printf("Abort copying the file completed."); } /** * Generates a code sample for using {@link FileClient */ public void abortCopyWithResponse() { FileClient fileClient = createClientWithSASToken(); VoidResponse response = fileClient.abortCopyWithResponse("someCopyId", Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("Abort copying the file completed with status code %d", response.statusCode()); } /** * Generates a code sample for using {@link FileClient */ public void uploadData() { FileClient fileClient = createClientWithSASToken(); ByteBuffer defaultData = ByteBuffer.wrap("default".getBytes(StandardCharsets.UTF_8)); FileUploadInfo response = fileClient.upload(defaultData, defaultData.remaining()); System.out.println("Complete uploading the data with eTag: " + response.eTag()); } /** * Generates a code sample for using {@link FileClient */ /** * Generates a code sample for using {@link FileClient * Duration, Context)} */ public void uploadWithResponseMaxOverload() { FileClient fileClient = createClientWithSASToken(); ByteBuffer defaultData = ByteBuffer.wrap("default".getBytes(StandardCharsets.UTF_8)); Response<FileUploadInfo> response = fileClient.uploadWithResponse(defaultData, defaultData.remaining(), 1024, Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Complete uploading the data with status code: " + response.statusCode()); } /** * Generates a code sample for using {@link FileClient */ public void clearRange() { FileClient fileClient = createClientWithSASToken(); FileUploadInfo response = fileClient.clearRange(1024); System.out.println("Complete clearing the range with eTag: " + response.eTag()); } /** * Generates a code sample for using {@link FileClient */ public void clearRangeMaxOverload() { FileClient fileClient = createClientWithSASToken(); Response<FileUploadInfo> response = fileClient.clearRangeWithResponse(1024, 1024, Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Complete clearing the range with status code: " + response.statusCode()); } /** * Generates a code sample for using {@link FileClient */ public void uploadFile() { FileClient fileClient = createClientWithSASToken(); fileClient.uploadFromFile("someFilePath"); } /** * Generates a code sample for using {@link FileClient */ public void uploadFileMaxOverload() { FileClient fileClient = createClientWithSASToken(); fileClient.uploadFromFile("someFilePath"); if (fileClient.getProperties() != null) { System.out.printf("Upload the file with length of %d completed", fileClient.getProperties().contentLength()); } } /** * Generates a code sample for using {@link FileClient */ public void downloadData() { FileClient fileClient = createClientWithSASToken(); FileDownloadInfo response = fileClient.downloadWithProperties(); System.out.println("Complete downloading the data."); response.body().subscribe( byteBuffer -> System.out.println("Complete downloading the data with body: " + new String(byteBuffer.array(), StandardCharsets.UTF_8)), error -> System.err.print(error.toString()), () -> System.out.println("Complete downloading the data!") ); } /** * Generates a code sample for using {@link FileClient * FileRange, Boolean, Duration, Context)} */ public void downloadWithPropertiesWithResponse() { FileClient fileClient = createClientWithSASToken(); Response<FileDownloadInfo> response = fileClient.downloadWithPropertiesWithResponse(new FileRange(1024, 2047L), false, Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Complete downloading the data with status code: " + response.statusCode()); response.value().body().subscribe( byteBuffer -> System.out.println("Complete downloading the data with body: " + new String(byteBuffer.array(), StandardCharsets.UTF_8)), error -> System.err.print(error.toString()), () -> System.out.println("Complete downloading the data!") ); } /** * Generates a code sample for using {@link FileClient */ public void downloadFile() { FileClient fileClient = createClientWithSASToken(); fileClient.downloadToFile("somelocalfilepath"); if (Files.exists(Paths.get("somelocalfilepath"))) { System.out.println("Complete downloading the file."); } } /** * Generates a code sample for using {@link FileClient */ public void downloadFileMaxOverload() { FileClient fileClient = createClientWithSASToken(); fileClient.downloadToFile("somelocalfilepath", new FileRange(1024, 2047L)); if (Files.exists(Paths.get("somelocalfilepath"))) { System.out.println("Complete downloading the file."); } } /** * Generates a code sample for using {@link FileClient * @throws URISyntaxException when the URI is invalid */ public void uploadFileFromURLAsync() throws URISyntaxException { FileClient fileClient = createClientWithSASToken(); FileUploadRangeFromURLInfo response = fileClient.uploadRangeFromURL(6, 8, 0, new URI("filewithSAStoken")); System.out.println("Completed upload range from url!"); } /** * Generates a code sample for using {@link FileClient * @throws URISyntaxException when the URI is invalid */ public void uploadFileFromURLWithResponseAsync() throws URISyntaxException { FileClient fileClient = createClientWithSASToken(); Response<FileUploadRangeFromURLInfo> response = fileClient.uploadRangeFromURLWithResponse(6, 8, 0, new URI("filewithSAStoken"), Duration.ofSeconds(1), Context.NONE); System.out.println("Completed upload range from url!"); } /** * Generates a code sample for using {@link FileClient */ public void deleteFile() { FileClient fileClient = createClientWithSASToken(); fileClient.delete(); System.out.println("Complete deleting the file."); } /** * Generates a code sample for using {@link FileClient */ public void deleteWithResponse() { FileClient fileClient = createClientWithSASToken(); VoidResponse response = fileClient.deleteWithResponse(Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Complete deleting the file with status code: " + response.statusCode()); } /** * Generates a code sample for using {@link FileClient */ public void getProperties() { FileClient fileClient = createClientWithSASToken(); FileProperties properties = fileClient.getProperties(); System.out.printf("File latest modified date is %s.", properties.lastModified()); } /** * Generates a code sample for using {@link FileClient */ public void getPropertiesWithResponse() { FileClient fileClient = createClientWithSASToken(); Response<FileProperties> response = fileClient.getPropertiesWithResponse( Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("File latest modified date is %s.", response.value().lastModified()); } /** * Generates a code sample for using {@link FileClient */ public void setMetadata() { FileClient fileClient = createClientWithSASToken(); fileClient.setMetadata(Collections.singletonMap("file", "updatedMetadata")); System.out.printf("Setting the file metadata completed."); } /** * Generates a code sample for using {@link FileClient */ public void setMetadataWithResponse() { FileClient fileClient = createClientWithSASToken(); Response<FileMetadataInfo> response = fileClient.setMetadataWithResponse( Collections.singletonMap("file", "updatedMetadata"), Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("Setting the file metadata completed with status code %d", response.statusCode()); } /** * Generates a code sample for using {@link FileClient * Duration, Context)} to clear metadata. */ public void clearMetadataWithResponse() { FileClient fileClient = createClientWithSASToken(); Response<FileMetadataInfo> response = fileClient.setMetadataWithResponse(null, Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("Setting the file metadata completed with status code %d", response.statusCode()); } /** * Generates a code sample for using {@link FileClient */ public void clearMetadata() { FileClient fileClient = createClientWithSASToken(); fileClient.setMetadata(null); System.out.printf("Setting the file metadata completed."); } /** * Generates a code sample for using {@link FileClient */ public void setHTTPHeaders() { FileClient fileClient = createClientWithSASToken(); FileHTTPHeaders httpHeaders = new FileHTTPHeaders() .fileContentType("text/html") .fileContentEncoding("gzip") .fileContentLanguage("en") .fileCacheControl("no-transform") .fileContentDisposition("attachment"); FileSmbProperties smbProperties = new FileSmbProperties() .ntfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY)) .fileCreationTime(OffsetDateTime.now()) .fileLastWriteTime(OffsetDateTime.now()) .filePermissionKey("filePermissionKey"); String filePermission = "filePermission"; fileClient.setProperties(1024, httpHeaders, smbProperties, filePermission); System.out.printf("Setting the file httpHeaders completed."); } /** * Generates a code sample for using {@link FileClient * to clear httpHeaders and preserve SMB properties. */ public void clearSyncHTTPHeaders() { FileClient fileClient = createClientWithSASToken(); FileInfo response = fileClient.setProperties(1024, null, null, null); System.out.printf("Setting the file httpHeaders completed."); } /** * Generates a code sample for using {@link FileClient * FileSmbProperties, String, Duration, Context)} */ public void setHttpHeadersWithResponse() { FileClient fileClient = createClientWithSASToken(); FileHTTPHeaders httpHeaders = new FileHTTPHeaders() .fileContentType("text/html") .fileContentEncoding("gzip") .fileContentLanguage("en") .fileCacheControl("no-transform") .fileContentDisposition("attachment"); FileSmbProperties smbProperties = new FileSmbProperties() .ntfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY)) .fileCreationTime(OffsetDateTime.now()) .fileLastWriteTime(OffsetDateTime.now()) .filePermissionKey("filePermissionKey"); String filePermission = "filePermission"; Response<FileInfo> response = fileClient.setPropertiesWithResponse(1024, httpHeaders, smbProperties, filePermission, Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("Setting the file httpHeaders completed with status code %d", response.statusCode()); } /** * Generates a code sample for using {@link FileClient * (long, FileHTTPHeaders)}
Didn't update the code snippet name
public void uploadWithResponseMaxOverload() { FileClient fileClient = createClientWithSASToken(); ByteBuffer defaultData = ByteBuffer.wrap("default".getBytes(StandardCharsets.UTF_8)); Response<FileUploadInfo> response = fileClient.uploadWithResponse(defaultData, defaultData.remaining(), 1024, Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Complete uploading the data with status code: " + response.statusCode()); }
public void uploadWithResponseMaxOverload() { FileClient fileClient = createClientWithSASToken(); ByteBuffer defaultData = ByteBuffer.wrap("default".getBytes(StandardCharsets.UTF_8)); Response<FileUploadInfo> response = fileClient.uploadWithResponse(defaultData, defaultData.remaining(), 1024, Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Complete uploading the data with status code: " + response.statusCode()); }
class FileJavaDocCodeSamples { private String key1 = "key1"; private String value1 = "val1"; /** * Generates code sample for {@link FileClient} instantiation. */ public void initialization() { FileClient client = new FileClientBuilder() .connectionString("${connectionString}") .endpoint("${endpoint}") .buildFileClient(); } /** * Generates code sample for creating a {@link FileClient} with {@link SASTokenCredential} * @return An instance of {@link FileClient} */ public FileClient createClientWithSASToken() { FileClient fileClient = new FileClientBuilder() .endpoint("https: .shareName("myshare") .resourcePath("myfilepath") .buildFileClient(); return fileClient; } /** * Generates code sample for creating a {@link FileClient} with {@link SASTokenCredential} * @return An instance of {@link FileClient} */ public FileClient createClientWithCredential() { FileClient fileClient = new FileClientBuilder() .endpoint("https: .credential(SASTokenCredential.fromQueryParameters(Utility.parseQueryString("${SASTokenQueryParams}"))) .shareName("myshare") .resourcePath("myfilepath") .buildFileClient(); return fileClient; } /** * Generates code sample for creating a {@link FileClient} with {@code connectionString} * which turns into {@link SharedKeyCredential} * @return An instance of {@link FileClient} */ public FileClient createClientWithConnectionString() { String connectionString = "DefaultEndpointsProtocol=https;AccountName={name};AccountKey={key};" + "EndpointSuffix={core.windows.net}"; FileClient fileClient = new FileClientBuilder() .connectionString(connectionString).shareName("myshare").resourcePath("myfilepath") .buildFileClient(); return fileClient; } /** * Generates a code sample for using {@link FileClient */ public void createFile() { FileClient fileClient = createClientWithSASToken(); FileInfo response = fileClient.create(1024); System.out.println("Complete creating the file."); } /** * Generates a code sample for using {@link FileClient * Duration, Context)} */ public void createWithResponse() { FileClient fileClient = createClientWithSASToken(); FileHTTPHeaders httpHeaders = new FileHTTPHeaders().fileContentType("text/plain"); Response<FileInfo> response = fileClient.createWithResponse(1024, httpHeaders, Collections.singletonMap("file", "updatedMetadata"), Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("Creating the file completed with status code %d", response.statusCode()); } /** * Generates a code sample for using {@link FileClient */ public void startCopy() { FileClient fileClient = createClientWithSASToken(); FileCopyInfo response = fileClient.startCopy( "https: Collections.singletonMap("file", "metadata")); System.out.println("Complete copying the file with copy Id: " + response.copyId()); } /** * Generates a code sample for using {@link FileClient */ public void startCopyWithResponse() { FileClient fileClient = createClientWithSASToken(); Response<FileCopyInfo> response = fileClient.startCopyWithResponse( "https: Collections.singletonMap("file", "metadata"), Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Complete copying the file with copy Id: " + response.value().copyId()); } /** * Generates a code sample for using {@link FileClient */ public void abortCopyFile() { FileClient fileClient = createClientWithSASToken(); fileClient.abortCopy("someCopyId"); System.out.printf("Abort copying the file completed."); } /** * Generates a code sample for using {@link FileClient */ public void abortCopyWithResponse() { FileClient fileClient = createClientWithSASToken(); VoidResponse response = fileClient.abortCopyWithResponse("someCopyId", Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("Abort copying the file completed with status code %d", response.statusCode()); } /** * Generates a code sample for using {@link FileClient */ public void uploadData() { FileClient fileClient = createClientWithSASToken(); ByteBuffer defaultData = ByteBuffer.wrap("default".getBytes(StandardCharsets.UTF_8)); FileUploadInfo response = fileClient.upload(defaultData, defaultData.remaining()); System.out.println("Complete uploading the data with eTag: " + response.eTag()); } /** * Generates a code sample for using {@link FileClient */ public void uploadWithResponse() { FileClient fileClient = createClientWithSASToken(); ByteBuffer defaultData = ByteBuffer.wrap("default".getBytes(StandardCharsets.UTF_8)); Response<FileUploadInfo> response = fileClient.uploadWithResponse(defaultData, defaultData.remaining(), Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Complete uploading the data with status code: " + response.statusCode()); } /** * Generates a code sample for using {@link FileClient * Duration, Context)} */ /** * Generates a code sample for using {@link FileClient */ public void clearRange() { FileClient fileClient = createClientWithSASToken(); FileUploadInfo response = fileClient.clearRange(1024); System.out.println("Complete clearing the range with eTag: " + response.eTag()); } /** * Generates a code sample for using {@link FileClient */ public void clearRangeMaxOverload() { FileClient fileClient = createClientWithSASToken(); Response<FileUploadInfo> response = fileClient.clearRangeWithResponse(1024, 1024, Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Complete clearing the range with status code: " + response.statusCode()); } /** * Generates a code sample for using {@link FileClient */ public void uploadFile() { FileClient fileClient = createClientWithSASToken(); fileClient.uploadFromFile("someFilePath"); } /** * Generates a code sample for using {@link FileClient */ public void uploadFileMaxOverload() { FileClient fileClient = createClientWithSASToken(); fileClient.uploadFromFile("someFilePath"); if (fileClient.getProperties() != null) { System.out.printf("Upload the file with length of %d completed", fileClient.getProperties().contentLength()); } } /** * Generates a code sample for using {@link FileClient */ public void downloadData() { FileClient fileClient = createClientWithSASToken(); FileDownloadInfo response = fileClient.downloadWithProperties(); System.out.println("Complete downloading the data."); response.body().subscribe( byteBuffer -> System.out.println("Complete downloading the data with body: " + new String(byteBuffer.array(), StandardCharsets.UTF_8)), error -> System.err.print(error.toString()), () -> System.out.println("Complete downloading the data!") ); } /** * Generates a code sample for using {@link FileClient * FileRange, Boolean, Duration, Context)} */ public void downloadWithPropertiesWithResponse() { FileClient fileClient = createClientWithSASToken(); Response<FileDownloadInfo> response = fileClient.downloadWithPropertiesWithResponse(new FileRange(1024, 2047L), false, Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Complete downloading the data with status code: " + response.statusCode()); response.value().body().subscribe( byteBuffer -> System.out.println("Complete downloading the data with body: " + new String(byteBuffer.array(), StandardCharsets.UTF_8)), error -> System.err.print(error.toString()), () -> System.out.println("Complete downloading the data!") ); } /** * Generates a code sample for using {@link FileClient */ public void downloadFile() { FileClient fileClient = createClientWithSASToken(); fileClient.downloadToFile("somelocalfilepath"); if (Files.exists(Paths.get("somelocalfilepath"))) { System.out.println("Complete downloading the file."); } } /** * Generates a code sample for using {@link FileClient */ public void downloadFileMaxOverload() { FileClient fileClient = createClientWithSASToken(); fileClient.downloadToFile("somelocalfilepath", new FileRange(1024, 2047L)); if (Files.exists(Paths.get("somelocalfilepath"))) { System.out.println("Complete downloading the file."); } } /** * Generates a code sample for using {@link FileClient */ public void deleteFile() { FileClient fileClient = createClientWithSASToken(); fileClient.delete(); System.out.println("Complete deleting the file."); } /** * Generates a code sample for using {@link FileClient */ public void deleteWithResponse() { FileClient fileClient = createClientWithSASToken(); VoidResponse response = fileClient.deleteWithResponse(Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Complete deleting the file with status code: " + response.statusCode()); } /** * Generates a code sample for using {@link FileClient */ public void getProperties() { FileClient fileClient = createClientWithSASToken(); FileProperties properties = fileClient.getProperties(); System.out.printf("File latest modified date is %s.", properties.lastModified()); } /** * Generates a code sample for using {@link FileClient */ public void getPropertiesWithResponse() { FileClient fileClient = createClientWithSASToken(); Response<FileProperties> response = fileClient.getPropertiesWithResponse( Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("File latest modified date is %s.", response.value().lastModified()); } /** * Generates a code sample for using {@link FileClient */ public void setMetadata() { FileClient fileClient = createClientWithSASToken(); fileClient.setMetadata(Collections.singletonMap("file", "updatedMetadata")); System.out.printf("Setting the file metadata completed."); } /** * Generates a code sample for using {@link FileClient */ public void setMetadataWithResponse() { FileClient fileClient = createClientWithSASToken(); Response<FileMetadataInfo> response = fileClient.setMetadataWithResponse( Collections.singletonMap("file", "updatedMetadata"), Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("Setting the file metadata completed with status code %d", response.statusCode()); } /** * Generates a code sample for using {@link FileClient * Duration, Context)} to clear metadata. */ public void clearMetadataWithResponse() { FileClient fileClient = createClientWithSASToken(); Response<FileMetadataInfo> response = fileClient.setMetadataWithResponse(null, Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("Setting the file metadata completed with status code %d", response.statusCode()); } /** * Generates a code sample for using {@link FileClient */ public void clearMetadata() { FileClient fileClient = createClientWithSASToken(); fileClient.setMetadataWithResponse(null, null, new Context(key1, value1)); System.out.printf("Setting the file metadata completed."); } /** * Generates a code sample for using {@link FileClient */ public void setHTTPHeaders() { FileClient fileClient = createClientWithSASToken(); FileHTTPHeaders httpHeaders = new FileHTTPHeaders().fileContentType("text/plain"); fileClient.setHttpHeaders(1024, httpHeaders); System.out.printf("Setting the file httpHeaders completed."); } /** * Generates a code sample for using {@link FileClient */ public void clearSyncHTTPHeaders() { FileClient fileClient = createClientWithSASToken(); FileInfo response = fileClient.setHttpHeaders(1024, null); System.out.printf("Setting the file httpHeaders completed."); } /** * Generates a code sample for using {@link FileClient * Duration, Context)} */ public void setHttpHeadersWithResponse() { FileClient fileClient = createClientWithSASToken(); FileHTTPHeaders httpHeaders = new FileHTTPHeaders().fileContentType("text/plain"); Response<FileInfo> response = fileClient.setHttpHeadersWithResponse(1024, httpHeaders, Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("Setting the file httpHeaders completed with status code %d", response.statusCode()); } /** * Generates a code sample for using {@link FileClient * Duration, Context)} * (long, FileHTTPHeaders)}
class FileJavaDocCodeSamples { private String key1 = "key1"; private String value1 = "val1"; /** * Generates code sample for {@link FileClient} instantiation. */ public void initialization() { FileClient client = new FileClientBuilder() .connectionString("${connectionString}") .endpoint("${endpoint}") .buildFileClient(); } /** * Generates code sample for creating a {@link FileClient} with {@link SASTokenCredential} * @return An instance of {@link FileClient} */ public FileClient createClientWithSASToken() { FileClient fileClient = new FileClientBuilder() .endpoint("https: .shareName("myshare") .resourcePath("myfilepath") .buildFileClient(); return fileClient; } /** * Generates code sample for creating a {@link FileClient} with {@link SASTokenCredential} * @return An instance of {@link FileClient} */ public FileClient createClientWithCredential() { FileClient fileClient = new FileClientBuilder() .endpoint("https: .credential(SASTokenCredential.fromQueryParameters(Utility.parseQueryString("${SASTokenQueryParams}"))) .shareName("myshare") .resourcePath("myfilepath") .buildFileClient(); return fileClient; } /** * Generates code sample for creating a {@link FileClient} with {@code connectionString} * which turns into {@link SharedKeyCredential} * @return An instance of {@link FileClient} */ public FileClient createClientWithConnectionString() { String connectionString = "DefaultEndpointsProtocol=https;AccountName={name};AccountKey={key};" + "EndpointSuffix={core.windows.net}"; FileClient fileClient = new FileClientBuilder() .connectionString(connectionString).shareName("myshare").resourcePath("myfilepath") .buildFileClient(); return fileClient; } /** * Generates a code sample for using {@link FileClient */ public void createFile() { FileClient fileClient = createClientWithSASToken(); FileInfo response = fileClient.create(1024); System.out.println("Complete creating the file."); } /** * Generates a code sample for using {@link FileClient * String, Map, Duration, Context)} */ public void createWithResponse() { FileClient fileClient = createClientWithSASToken(); FileHTTPHeaders httpHeaders = new FileHTTPHeaders() .fileContentType("text/html") .fileContentEncoding("gzip") .fileContentLanguage("en") .fileCacheControl("no-transform") .fileContentDisposition("attachment"); FileSmbProperties smbProperties = new FileSmbProperties() .ntfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY)) .fileCreationTime(OffsetDateTime.now()) .fileLastWriteTime(OffsetDateTime.now()) .filePermissionKey("filePermissionKey"); String filePermission = "filePermission"; Response<FileInfo> response = fileClient.createWithResponse(1024, httpHeaders, smbProperties, filePermission, Collections.singletonMap("directory", "metadata"), Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("Creating the file completed with status code %d", response.statusCode()); } /** * Generates a code sample for using {@link FileClient */ public void startCopy() { FileClient fileClient = createClientWithSASToken(); FileCopyInfo response = fileClient.startCopy( "https: Collections.singletonMap("file", "metadata")); System.out.println("Complete copying the file with copy Id: " + response.copyId()); } /** * Generates a code sample for using {@link FileClient */ public void startCopyWithResponse() { FileClient fileClient = createClientWithSASToken(); Response<FileCopyInfo> response = fileClient.startCopyWithResponse( "https: Collections.singletonMap("file", "metadata"), Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Complete copying the file with copy Id: " + response.value().copyId()); } /** * Generates a code sample for using {@link FileClient */ public void abortCopyFile() { FileClient fileClient = createClientWithSASToken(); fileClient.abortCopy("someCopyId"); System.out.printf("Abort copying the file completed."); } /** * Generates a code sample for using {@link FileClient */ public void abortCopyWithResponse() { FileClient fileClient = createClientWithSASToken(); VoidResponse response = fileClient.abortCopyWithResponse("someCopyId", Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("Abort copying the file completed with status code %d", response.statusCode()); } /** * Generates a code sample for using {@link FileClient */ public void uploadData() { FileClient fileClient = createClientWithSASToken(); ByteBuffer defaultData = ByteBuffer.wrap("default".getBytes(StandardCharsets.UTF_8)); FileUploadInfo response = fileClient.upload(defaultData, defaultData.remaining()); System.out.println("Complete uploading the data with eTag: " + response.eTag()); } /** * Generates a code sample for using {@link FileClient */ public void uploadWithResponse() { FileClient fileClient = createClientWithSASToken(); ByteBuffer defaultData = ByteBuffer.wrap("default".getBytes(StandardCharsets.UTF_8)); Response<FileUploadInfo> response = fileClient.uploadWithResponse(defaultData, defaultData.remaining(), Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Complete uploading the data with status code: " + response.statusCode()); } /** * Generates a code sample for using {@link FileClient * Duration, Context)} */ /** * Generates a code sample for using {@link FileClient */ public void clearRange() { FileClient fileClient = createClientWithSASToken(); FileUploadInfo response = fileClient.clearRange(1024); System.out.println("Complete clearing the range with eTag: " + response.eTag()); } /** * Generates a code sample for using {@link FileClient */ public void clearRangeMaxOverload() { FileClient fileClient = createClientWithSASToken(); Response<FileUploadInfo> response = fileClient.clearRangeWithResponse(1024, 1024, Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Complete clearing the range with status code: " + response.statusCode()); } /** * Generates a code sample for using {@link FileClient */ public void uploadFile() { FileClient fileClient = createClientWithSASToken(); fileClient.uploadFromFile("someFilePath"); } /** * Generates a code sample for using {@link FileClient */ public void uploadFileMaxOverload() { FileClient fileClient = createClientWithSASToken(); fileClient.uploadFromFile("someFilePath"); if (fileClient.getProperties() != null) { System.out.printf("Upload the file with length of %d completed", fileClient.getProperties().contentLength()); } } /** * Generates a code sample for using {@link FileClient */ public void downloadData() { FileClient fileClient = createClientWithSASToken(); FileDownloadInfo response = fileClient.downloadWithProperties(); System.out.println("Complete downloading the data."); response.body().subscribe( byteBuffer -> System.out.println("Complete downloading the data with body: " + new String(byteBuffer.array(), StandardCharsets.UTF_8)), error -> System.err.print(error.toString()), () -> System.out.println("Complete downloading the data!") ); } /** * Generates a code sample for using {@link FileClient * FileRange, Boolean, Duration, Context)} */ public void downloadWithPropertiesWithResponse() { FileClient fileClient = createClientWithSASToken(); Response<FileDownloadInfo> response = fileClient.downloadWithPropertiesWithResponse(new FileRange(1024, 2047L), false, Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Complete downloading the data with status code: " + response.statusCode()); response.value().body().subscribe( byteBuffer -> System.out.println("Complete downloading the data with body: " + new String(byteBuffer.array(), StandardCharsets.UTF_8)), error -> System.err.print(error.toString()), () -> System.out.println("Complete downloading the data!") ); } /** * Generates a code sample for using {@link FileClient */ public void downloadFile() { FileClient fileClient = createClientWithSASToken(); fileClient.downloadToFile("somelocalfilepath"); if (Files.exists(Paths.get("somelocalfilepath"))) { System.out.println("Complete downloading the file."); } } /** * Generates a code sample for using {@link FileClient */ public void downloadFileMaxOverload() { FileClient fileClient = createClientWithSASToken(); fileClient.downloadToFile("somelocalfilepath", new FileRange(1024, 2047L)); if (Files.exists(Paths.get("somelocalfilepath"))) { System.out.println("Complete downloading the file."); } } /** * Generates a code sample for using {@link FileClient * @throws URISyntaxException when the URI is invalid */ public void uploadFileFromURLAsync() throws URISyntaxException { FileClient fileClient = createClientWithSASToken(); FileUploadRangeFromURLInfo response = fileClient.uploadRangeFromURL(6, 8, 0, new URI("filewithSAStoken")); System.out.println("Completed upload range from url!"); } /** * Generates a code sample for using {@link FileClient * @throws URISyntaxException when the URI is invalid */ public void uploadFileFromURLWithResponseAsync() throws URISyntaxException { FileClient fileClient = createClientWithSASToken(); Response<FileUploadRangeFromURLInfo> response = fileClient.uploadRangeFromURLWithResponse(6, 8, 0, new URI("filewithSAStoken"), Duration.ofSeconds(1), Context.NONE); System.out.println("Completed upload range from url!"); } /** * Generates a code sample for using {@link FileClient */ public void deleteFile() { FileClient fileClient = createClientWithSASToken(); fileClient.delete(); System.out.println("Complete deleting the file."); } /** * Generates a code sample for using {@link FileClient */ public void deleteWithResponse() { FileClient fileClient = createClientWithSASToken(); VoidResponse response = fileClient.deleteWithResponse(Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Complete deleting the file with status code: " + response.statusCode()); } /** * Generates a code sample for using {@link FileClient */ public void getProperties() { FileClient fileClient = createClientWithSASToken(); FileProperties properties = fileClient.getProperties(); System.out.printf("File latest modified date is %s.", properties.lastModified()); } /** * Generates a code sample for using {@link FileClient */ public void getPropertiesWithResponse() { FileClient fileClient = createClientWithSASToken(); Response<FileProperties> response = fileClient.getPropertiesWithResponse( Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("File latest modified date is %s.", response.value().lastModified()); } /** * Generates a code sample for using {@link FileClient */ public void setMetadata() { FileClient fileClient = createClientWithSASToken(); fileClient.setMetadata(Collections.singletonMap("file", "updatedMetadata")); System.out.printf("Setting the file metadata completed."); } /** * Generates a code sample for using {@link FileClient */ public void setMetadataWithResponse() { FileClient fileClient = createClientWithSASToken(); Response<FileMetadataInfo> response = fileClient.setMetadataWithResponse( Collections.singletonMap("file", "updatedMetadata"), Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("Setting the file metadata completed with status code %d", response.statusCode()); } /** * Generates a code sample for using {@link FileClient * Duration, Context)} to clear metadata. */ public void clearMetadataWithResponse() { FileClient fileClient = createClientWithSASToken(); Response<FileMetadataInfo> response = fileClient.setMetadataWithResponse(null, Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("Setting the file metadata completed with status code %d", response.statusCode()); } /** * Generates a code sample for using {@link FileClient */ public void clearMetadata() { FileClient fileClient = createClientWithSASToken(); fileClient.setMetadata(null); System.out.printf("Setting the file metadata completed."); } /** * Generates a code sample for using {@link FileClient */ public void setHTTPHeaders() { FileClient fileClient = createClientWithSASToken(); FileHTTPHeaders httpHeaders = new FileHTTPHeaders() .fileContentType("text/html") .fileContentEncoding("gzip") .fileContentLanguage("en") .fileCacheControl("no-transform") .fileContentDisposition("attachment"); FileSmbProperties smbProperties = new FileSmbProperties() .ntfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY)) .fileCreationTime(OffsetDateTime.now()) .fileLastWriteTime(OffsetDateTime.now()) .filePermissionKey("filePermissionKey"); String filePermission = "filePermission"; fileClient.setProperties(1024, httpHeaders, smbProperties, filePermission); System.out.printf("Setting the file httpHeaders completed."); } /** * Generates a code sample for using {@link FileClient * to clear httpHeaders and preserve SMB properties. */ public void clearSyncHTTPHeaders() { FileClient fileClient = createClientWithSASToken(); FileInfo response = fileClient.setProperties(1024, null, null, null); System.out.printf("Setting the file httpHeaders completed."); } /** * Generates a code sample for using {@link FileClient * FileSmbProperties, String, Duration, Context)} */ public void setHttpHeadersWithResponse() { FileClient fileClient = createClientWithSASToken(); FileHTTPHeaders httpHeaders = new FileHTTPHeaders() .fileContentType("text/html") .fileContentEncoding("gzip") .fileContentLanguage("en") .fileCacheControl("no-transform") .fileContentDisposition("attachment"); FileSmbProperties smbProperties = new FileSmbProperties() .ntfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY)) .fileCreationTime(OffsetDateTime.now()) .fileLastWriteTime(OffsetDateTime.now()) .filePermissionKey("filePermissionKey"); String filePermission = "filePermission"; Response<FileInfo> response = fileClient.setPropertiesWithResponse(1024, httpHeaders, smbProperties, filePermission, Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("Setting the file httpHeaders completed with status code %d", response.statusCode()); } /** * Generates a code sample for using {@link FileClient * (long, FileHTTPHeaders)}
For async APIs, you should use the Flux or Mono context instead of `Context.None`
private Consumer<Poller<CertificateOperation>> cancelOperation(String name) { return poller -> { service.updateCertificateOperation(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, new CertificateOperationUpdateParameter().cancellationRequested(true), CONTENT_TYPE_HEADER_VALUE, Context.NONE); }; }
new CertificateOperationUpdateParameter().cancellationRequested(true), CONTENT_TYPE_HEADER_VALUE, Context.NONE);
new CertificateOperationUpdateParameter().cancellationRequested(true); return service.updateCertificateOperation(endpoint, certificateName, API_VERSION, ACCEPT_LANGUAGE, parameter, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Cancelling certificate operation - {}
class CertificateAsyncClient { static final String API_VERSION = "7.0"; static final String ACCEPT_LANGUAGE = "en-US"; static final int DEFAULT_MAX_PAGE_RESULTS = 25; static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private String endpoint; private final CertificateService service; private final ClientLogger logger = new ClientLogger(CertificateAsyncClient.class); /** * Creates a CertificateAsyncClient that uses {@code pipeline} to service requests * * @param endpoint URL for the Azure KeyVault service. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. */ CertificateAsyncClient(URL endpoint, HttpPipeline pipeline) { Objects.requireNonNull(endpoint, KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED)); this.endpoint = endpoint.toString(); this.service = RestProxy.create(CertificateService.class, pipeline); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link Poller poller} allows to automatically poll on crate certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @param tags The application specific metadata to set. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link Poller} polling on the create certificate operation status. */ public Poller<CertificateOperation> createCertificate(String name, CertificatePolicy policy, Map<String, String> tags) { return new Poller<CertificateOperation>(Duration.ofSeconds(1), createPollOperation(name), actvationOperation(name, policy, tags), cancelOperation(name)); } private Consumer<Poller<CertificateOperation>> cancelOperation(String name) { return poller -> { service.updateCertificateOperation(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link Poller poller} allows to automatically poll on crate certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.createCertificate} * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link Poller} polling on the create certificate operation status. */ public Poller<CertificateOperation> createCertificate(String name, CertificatePolicy policy) { return createCertificate(name, policy, null); } /* Polling operation to poll on create certificate operation status. */ private Function<PollResponse<CertificateOperation>, Mono<PollResponse<CertificateOperation>>> createPollOperation(String certificateName) { return prePollResponse -> { try { System.out.println("Calling Polling Function"); return service.getCertificateOperation(endpoint, certificateName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) .flatMap(this::processCertificateOperationResponse); } catch (HttpRequestException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(PollResponse.OperationStatus.FAILED, null)); } }; } private Mono<PollResponse<CertificateOperation>> processCertificateOperationResponse(Response<CertificateOperation> certificateOperationResponse) { PollResponse.OperationStatus status = null; switch (certificateOperationResponse.value().status()) { case "inProgress": status = PollResponse.OperationStatus.IN_PROGRESS; break; case "completed": status = PollResponse.OperationStatus.SUCCESSFULLY_COMPLETED; break; case "failed": status = PollResponse.OperationStatus.FAILED; break; } return Mono.just(new PollResponse<>(status, certificateOperationResponse.value())); } Mono<Response<CertificateOperation>> createCertificateWithResponse(String name, CertificatePolicy certificatePolicy, Map<String, String> tags) { CertificateRequestParameters certificateRequestParameters = new CertificateRequestParameters() .certificatePolicy(new CertificatePolicyRequest(certificatePolicy)) .tags(tags); return service.createCertificate(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, certificateRequestParameters, CONTENT_TYPE_HEADER_VALUE); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.getCertificateWithPolicy} * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link Certificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Certificate> getCertificateWithPolicy(String name) { return withContext(context -> getCertificateWithResponse(name, "", context)).flatMap(FluxUtil::toMono); } Mono<Response<Certificate>> getCertificateWithResponse(String name, String version, Context context) { return service.getCertificate(endpoint, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.value().name())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.getCertificateWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null, this call is equivalent to calling {@link CertificateAsyncClient * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Certificate>> getCertificateWithResponse(String name, String version) { return withContext(context -> getCertificateWithResponse(name, version == null ? "" : version, context)); } /** * Gets information about the specified version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.getCertificate} * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null, this call is equivalent to calling {@link CertificateAsyncClient * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link Certificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Certificate> getCertificate(String name, String version) { return withContext(context -> getCertificateWithResponse(name, version == null ? "" : version, context)).flatMap(FluxUtil::toMono); } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its tags and enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.updateCertificate * * @param certificate The {@link CertificateBase base certificate} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateBase * @throws HttpRequestException if {@link CertificateBase * @return A {@link Mono} containing the {@link CertificateBase updated certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Certificate> updateCertificate(CertificateBase certificate) { return withContext(context -> updateCertificateWithResponse(certificate, context)).flatMap(FluxUtil::toMono); } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged.. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.updateCertificateWithResponse * * @param certificate The {@link CertificateBase base certificate} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateBase * @throws HttpRequestException if {@link CertificateBase * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Certificate>> updateCertificateWithResponse(CertificateBase certificate) { return withContext(context -> updateCertificateWithResponse(certificate, context)); } Mono<Response<Certificate>> updateCertificateWithResponse(CertificateBase certificateBase, Context context) { Objects.requireNonNull(certificateBase, "The certificate input parameter cannot be null"); CertificateUpdateParameters parameters = new CertificateUpdateParameters() .tags(certificateBase.tags()) .certificateAttributes(new CertificateRequestAttributes(certificateBase)); return service.updateCertificate(endpoint, certificateBase.name(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Updating certificate - {}", certificateBase.name())) .doOnSuccess(response -> logger.info("Updated the certificate - {}", certificateBase.name())) .doOnError(error -> logger.warning("Failed to update the certificate - {}", certificateBase.name(), error)); } /** * Gets information about the certificate which represents {@link CertificateBase certificateBase} from the key vault. This * operation requires the certificates/get permission. * * <p>The list operations {@link CertificateAsyncClient * the {@link Flux} containing {@link CertificateBase base certificate} as output excluding the properties like secretId and keyId of the certificate. * This operation can then be used to get the full certificate with its properties excluding the policy from {@code certificateBase}.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.getCertificate * * @param certificateBase The {@link CertificateBase base certificate} holding attributes of the certificate being requested. * @throws ResourceNotFoundException when a certificate with {@link CertificateBase * @throws HttpRequestException if {@link CertificateBase
class CertificateAsyncClient { static final String API_VERSION = "7.0"; static final String ACCEPT_LANGUAGE = "en-US"; static final int DEFAULT_MAX_PAGE_RESULTS = 25; static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private final String endpoint; private final CertificateService service; private final ClientLogger logger = new ClientLogger(CertificateAsyncClient.class); /** * Creates a CertificateAsyncClient that uses {@code pipeline} to service requests * * @param endpoint URL for the Azure KeyVault service. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. */ CertificateAsyncClient(URL endpoint, HttpPipeline pipeline) { Objects.requireNonNull(endpoint, KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED)); this.endpoint = endpoint.toString(); this.service = RestProxy.create(CertificateService.class, pipeline); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link Poller poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @param tags The application specific metadata to set. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link Poller} polling on the create certificate operation status. */ public Poller<CertificateOperation> createCertificate(String name, CertificatePolicy policy, Map<String, String> tags) { return new Poller<CertificateOperation>(Duration.ofSeconds(1), createPollOperation(name), activationOperation(name, policy, tags), cancelOperation(name)); } private Consumer<Poller<CertificateOperation>> cancelOperation(String name) { return poller -> withContext(context -> cancelCertificateOperationWithResponse(name, context)); } private Supplier<Mono<CertificateOperation>> activationOperation(String name, CertificatePolicy policy, Map<String, String> tags) { return () -> withContext(context -> createCertificateWithResponse(name, policy, tags, context) .flatMap(certificateOperationResponse -> Mono.just(certificateOperationResponse.value()))); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link Poller poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link Poller} polling on the create certificate operation status. */ public Poller<CertificateOperation> createCertificate(String name, CertificatePolicy policy) { return createCertificate(name, policy, null); } /* Polling operation to poll on create certificate operation status. */ private Function<PollResponse<CertificateOperation>, Mono<PollResponse<CertificateOperation>>> createPollOperation(String certificateName) { return prePollResponse -> { try { return withContext(context -> service.getCertificateOperation(endpoint, certificateName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .flatMap(this::processCertificateOperationResponse)); } catch (HttpRequestException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(PollResponse.OperationStatus.FAILED, null)); } }; } private Mono<PollResponse<CertificateOperation>> processCertificateOperationResponse(Response<CertificateOperation> certificateOperationResponse) { PollResponse.OperationStatus status = null; switch (certificateOperationResponse.value().status()) { case "inProgress": status = PollResponse.OperationStatus.IN_PROGRESS; break; case "completed": status = PollResponse.OperationStatus.SUCCESSFULLY_COMPLETED; break; case "failed": status = PollResponse.OperationStatus.FAILED; break; default: break; } return Mono.just(new PollResponse<>(status, certificateOperationResponse.value())); } Mono<Response<CertificateOperation>> createCertificateWithResponse(String name, CertificatePolicy certificatePolicy, Map<String, String> tags, Context context) { CertificateRequestParameters certificateRequestParameters = new CertificateRequestParameters() .certificatePolicy(new CertificatePolicyRequest(certificatePolicy)) .tags(tags); return service.createCertificate(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, certificateRequestParameters, CONTENT_TYPE_HEADER_VALUE, context); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateWithPolicy * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link Certificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Certificate> getCertificateWithPolicy(String name) { return withContext(context -> getCertificateWithResponse(name, "", context)).flatMap(FluxUtil::toMono); } Mono<Response<Certificate>> getCertificateWithResponse(String name, String version, Context context) { return service.getCertificate(endpoint, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.value().name())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null, this call is equivalent to calling {@link CertificateAsyncClient * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Certificate>> getCertificateWithResponse(String name, String version) { return withContext(context -> getCertificateWithResponse(name, version == null ? "" : version, context)); } /** * Gets information about the specified version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificate * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null, this call is equivalent to calling {@link CertificateAsyncClient * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link Certificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Certificate> getCertificate(String name, String version) { return withContext(context -> getCertificateWithResponse(name, version == null ? "" : version, context)).flatMap(FluxUtil::toMono); } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its tags and enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificate * * @param certificate The {@link CertificateBase} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateBase * @throws HttpRequestException if {@link CertificateBase * @return A {@link Mono} containing the {@link CertificateBase updated certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Certificate> updateCertificate(CertificateBase certificate) { return withContext(context -> updateCertificateWithResponse(certificate, context)).flatMap(FluxUtil::toMono); } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificateWithResponse * * @param certificate The {@link CertificateBase} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateBase * @throws HttpRequestException if {@link CertificateBase * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Certificate>> updateCertificateWithResponse(CertificateBase certificate) { return withContext(context -> updateCertificateWithResponse(certificate, context)); } Mono<Response<Certificate>> updateCertificateWithResponse(CertificateBase certificateBase, Context context) { Objects.requireNonNull(certificateBase, "The certificate input parameter cannot be null"); CertificateUpdateParameters parameters = new CertificateUpdateParameters() .tags(certificateBase.tags()) .certificateAttributes(new CertificateRequestAttributes(certificateBase)); return service.updateCertificate(endpoint, certificateBase.name(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Updating certificate - {}", certificateBase.name())) .doOnSuccess(response -> logger.info("Updated the certificate - {}", certificateBase.name())) .doOnError(error -> logger.warning("Failed to update the certificate - {}", certificateBase.name(), error)); } /** * Gets information about the certificate which represents the {@link CertificateBase} from the key vault. This * operation requires the certificates/get permission. * * <p>The list operations {@link CertificateAsyncClient * the {@link Flux} containing {@link CertificateBase} as output excluding the properties like secretId and keyId of the certificate. * This operation can then be used to get the full certificate with its properties excluding the policy from {@code certificateBase}.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificate * * @param certificateBase The {@link CertificateBase} holding attributes of the certificate being requested. * @throws ResourceNotFoundException when a certificate with {@link CertificateBase * @throws HttpRequestException if {@link CertificateBase
Didn't update the code snippet name
public void clearRangeMaxOverload() { FileClient fileClient = createClientWithSASToken(); Response<FileUploadInfo> response = fileClient.clearRangeWithResponse(1024, 1024, Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Complete clearing the range with status code: " + response.statusCode()); }
public void clearRangeMaxOverload() { FileClient fileClient = createClientWithSASToken(); Response<FileUploadInfo> response = fileClient.clearRangeWithResponse(1024, 1024, Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Complete clearing the range with status code: " + response.statusCode()); }
class FileJavaDocCodeSamples { private String key1 = "key1"; private String value1 = "val1"; /** * Generates code sample for {@link FileClient} instantiation. */ public void initialization() { FileClient client = new FileClientBuilder() .connectionString("${connectionString}") .endpoint("${endpoint}") .buildFileClient(); } /** * Generates code sample for creating a {@link FileClient} with {@link SASTokenCredential} * @return An instance of {@link FileClient} */ public FileClient createClientWithSASToken() { FileClient fileClient = new FileClientBuilder() .endpoint("https: .shareName("myshare") .resourcePath("myfilepath") .buildFileClient(); return fileClient; } /** * Generates code sample for creating a {@link FileClient} with {@link SASTokenCredential} * @return An instance of {@link FileClient} */ public FileClient createClientWithCredential() { FileClient fileClient = new FileClientBuilder() .endpoint("https: .credential(SASTokenCredential.fromQueryParameters(Utility.parseQueryString("${SASTokenQueryParams}"))) .shareName("myshare") .resourcePath("myfilepath") .buildFileClient(); return fileClient; } /** * Generates code sample for creating a {@link FileClient} with {@code connectionString} * which turns into {@link SharedKeyCredential} * @return An instance of {@link FileClient} */ public FileClient createClientWithConnectionString() { String connectionString = "DefaultEndpointsProtocol=https;AccountName={name};AccountKey={key};" + "EndpointSuffix={core.windows.net}"; FileClient fileClient = new FileClientBuilder() .connectionString(connectionString).shareName("myshare").resourcePath("myfilepath") .buildFileClient(); return fileClient; } /** * Generates a code sample for using {@link FileClient */ public void createFile() { FileClient fileClient = createClientWithSASToken(); FileInfo response = fileClient.create(1024); System.out.println("Complete creating the file."); } /** * Generates a code sample for using {@link FileClient * Duration, Context)} */ public void createWithResponse() { FileClient fileClient = createClientWithSASToken(); FileHTTPHeaders httpHeaders = new FileHTTPHeaders().fileContentType("text/plain"); Response<FileInfo> response = fileClient.createWithResponse(1024, httpHeaders, Collections.singletonMap("file", "updatedMetadata"), Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("Creating the file completed with status code %d", response.statusCode()); } /** * Generates a code sample for using {@link FileClient */ public void startCopy() { FileClient fileClient = createClientWithSASToken(); FileCopyInfo response = fileClient.startCopy( "https: Collections.singletonMap("file", "metadata")); System.out.println("Complete copying the file with copy Id: " + response.copyId()); } /** * Generates a code sample for using {@link FileClient */ public void startCopyWithResponse() { FileClient fileClient = createClientWithSASToken(); Response<FileCopyInfo> response = fileClient.startCopyWithResponse( "https: Collections.singletonMap("file", "metadata"), Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Complete copying the file with copy Id: " + response.value().copyId()); } /** * Generates a code sample for using {@link FileClient */ public void abortCopyFile() { FileClient fileClient = createClientWithSASToken(); fileClient.abortCopy("someCopyId"); System.out.printf("Abort copying the file completed."); } /** * Generates a code sample for using {@link FileClient */ public void abortCopyWithResponse() { FileClient fileClient = createClientWithSASToken(); VoidResponse response = fileClient.abortCopyWithResponse("someCopyId", Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("Abort copying the file completed with status code %d", response.statusCode()); } /** * Generates a code sample for using {@link FileClient */ public void uploadData() { FileClient fileClient = createClientWithSASToken(); ByteBuffer defaultData = ByteBuffer.wrap("default".getBytes(StandardCharsets.UTF_8)); FileUploadInfo response = fileClient.upload(defaultData, defaultData.remaining()); System.out.println("Complete uploading the data with eTag: " + response.eTag()); } /** * Generates a code sample for using {@link FileClient */ public void uploadWithResponse() { FileClient fileClient = createClientWithSASToken(); ByteBuffer defaultData = ByteBuffer.wrap("default".getBytes(StandardCharsets.UTF_8)); Response<FileUploadInfo> response = fileClient.uploadWithResponse(defaultData, defaultData.remaining(), Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Complete uploading the data with status code: " + response.statusCode()); } /** * Generates a code sample for using {@link FileClient * Duration, Context)} */ public void uploadWithResponseMaxOverload() { FileClient fileClient = createClientWithSASToken(); ByteBuffer defaultData = ByteBuffer.wrap("default".getBytes(StandardCharsets.UTF_8)); Response<FileUploadInfo> response = fileClient.uploadWithResponse(defaultData, defaultData.remaining(), 1024, Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Complete uploading the data with status code: " + response.statusCode()); } /** * Generates a code sample for using {@link FileClient */ public void clearRange() { FileClient fileClient = createClientWithSASToken(); FileUploadInfo response = fileClient.clearRange(1024); System.out.println("Complete clearing the range with eTag: " + response.eTag()); } /** * Generates a code sample for using {@link FileClient */ /** * Generates a code sample for using {@link FileClient */ public void uploadFile() { FileClient fileClient = createClientWithSASToken(); fileClient.uploadFromFile("someFilePath"); } /** * Generates a code sample for using {@link FileClient */ public void uploadFileMaxOverload() { FileClient fileClient = createClientWithSASToken(); fileClient.uploadFromFile("someFilePath"); if (fileClient.getProperties() != null) { System.out.printf("Upload the file with length of %d completed", fileClient.getProperties().contentLength()); } } /** * Generates a code sample for using {@link FileClient */ public void downloadData() { FileClient fileClient = createClientWithSASToken(); FileDownloadInfo response = fileClient.downloadWithProperties(); System.out.println("Complete downloading the data."); response.body().subscribe( byteBuffer -> System.out.println("Complete downloading the data with body: " + new String(byteBuffer.array(), StandardCharsets.UTF_8)), error -> System.err.print(error.toString()), () -> System.out.println("Complete downloading the data!") ); } /** * Generates a code sample for using {@link FileClient * FileRange, Boolean, Duration, Context)} */ public void downloadWithPropertiesWithResponse() { FileClient fileClient = createClientWithSASToken(); Response<FileDownloadInfo> response = fileClient.downloadWithPropertiesWithResponse(new FileRange(1024, 2047L), false, Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Complete downloading the data with status code: " + response.statusCode()); response.value().body().subscribe( byteBuffer -> System.out.println("Complete downloading the data with body: " + new String(byteBuffer.array(), StandardCharsets.UTF_8)), error -> System.err.print(error.toString()), () -> System.out.println("Complete downloading the data!") ); } /** * Generates a code sample for using {@link FileClient */ public void downloadFile() { FileClient fileClient = createClientWithSASToken(); fileClient.downloadToFile("somelocalfilepath"); if (Files.exists(Paths.get("somelocalfilepath"))) { System.out.println("Complete downloading the file."); } } /** * Generates a code sample for using {@link FileClient */ public void downloadFileMaxOverload() { FileClient fileClient = createClientWithSASToken(); fileClient.downloadToFile("somelocalfilepath", new FileRange(1024, 2047L)); if (Files.exists(Paths.get("somelocalfilepath"))) { System.out.println("Complete downloading the file."); } } /** * Generates a code sample for using {@link FileClient */ public void deleteFile() { FileClient fileClient = createClientWithSASToken(); fileClient.delete(); System.out.println("Complete deleting the file."); } /** * Generates a code sample for using {@link FileClient */ public void deleteWithResponse() { FileClient fileClient = createClientWithSASToken(); VoidResponse response = fileClient.deleteWithResponse(Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Complete deleting the file with status code: " + response.statusCode()); } /** * Generates a code sample for using {@link FileClient */ public void getProperties() { FileClient fileClient = createClientWithSASToken(); FileProperties properties = fileClient.getProperties(); System.out.printf("File latest modified date is %s.", properties.lastModified()); } /** * Generates a code sample for using {@link FileClient */ public void getPropertiesWithResponse() { FileClient fileClient = createClientWithSASToken(); Response<FileProperties> response = fileClient.getPropertiesWithResponse( Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("File latest modified date is %s.", response.value().lastModified()); } /** * Generates a code sample for using {@link FileClient */ public void setMetadata() { FileClient fileClient = createClientWithSASToken(); fileClient.setMetadata(Collections.singletonMap("file", "updatedMetadata")); System.out.printf("Setting the file metadata completed."); } /** * Generates a code sample for using {@link FileClient */ public void setMetadataWithResponse() { FileClient fileClient = createClientWithSASToken(); Response<FileMetadataInfo> response = fileClient.setMetadataWithResponse( Collections.singletonMap("file", "updatedMetadata"), Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("Setting the file metadata completed with status code %d", response.statusCode()); } /** * Generates a code sample for using {@link FileClient * Duration, Context)} to clear metadata. */ public void clearMetadataWithResponse() { FileClient fileClient = createClientWithSASToken(); Response<FileMetadataInfo> response = fileClient.setMetadataWithResponse(null, Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("Setting the file metadata completed with status code %d", response.statusCode()); } /** * Generates a code sample for using {@link FileClient */ public void clearMetadata() { FileClient fileClient = createClientWithSASToken(); fileClient.setMetadataWithResponse(null, null, new Context(key1, value1)); System.out.printf("Setting the file metadata completed."); } /** * Generates a code sample for using {@link FileClient */ public void setHTTPHeaders() { FileClient fileClient = createClientWithSASToken(); FileHTTPHeaders httpHeaders = new FileHTTPHeaders().fileContentType("text/plain"); fileClient.setHttpHeaders(1024, httpHeaders); System.out.printf("Setting the file httpHeaders completed."); } /** * Generates a code sample for using {@link FileClient */ public void clearSyncHTTPHeaders() { FileClient fileClient = createClientWithSASToken(); FileInfo response = fileClient.setHttpHeaders(1024, null); System.out.printf("Setting the file httpHeaders completed."); } /** * Generates a code sample for using {@link FileClient * Duration, Context)} */ public void setHttpHeadersWithResponse() { FileClient fileClient = createClientWithSASToken(); FileHTTPHeaders httpHeaders = new FileHTTPHeaders().fileContentType("text/plain"); Response<FileInfo> response = fileClient.setHttpHeadersWithResponse(1024, httpHeaders, Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("Setting the file httpHeaders completed with status code %d", response.statusCode()); } /** * Generates a code sample for using {@link FileClient * Duration, Context)} * (long, FileHTTPHeaders)}
class FileJavaDocCodeSamples { private String key1 = "key1"; private String value1 = "val1"; /** * Generates code sample for {@link FileClient} instantiation. */ public void initialization() { FileClient client = new FileClientBuilder() .connectionString("${connectionString}") .endpoint("${endpoint}") .buildFileClient(); } /** * Generates code sample for creating a {@link FileClient} with {@link SASTokenCredential} * @return An instance of {@link FileClient} */ public FileClient createClientWithSASToken() { FileClient fileClient = new FileClientBuilder() .endpoint("https: .shareName("myshare") .resourcePath("myfilepath") .buildFileClient(); return fileClient; } /** * Generates code sample for creating a {@link FileClient} with {@link SASTokenCredential} * @return An instance of {@link FileClient} */ public FileClient createClientWithCredential() { FileClient fileClient = new FileClientBuilder() .endpoint("https: .credential(SASTokenCredential.fromQueryParameters(Utility.parseQueryString("${SASTokenQueryParams}"))) .shareName("myshare") .resourcePath("myfilepath") .buildFileClient(); return fileClient; } /** * Generates code sample for creating a {@link FileClient} with {@code connectionString} * which turns into {@link SharedKeyCredential} * @return An instance of {@link FileClient} */ public FileClient createClientWithConnectionString() { String connectionString = "DefaultEndpointsProtocol=https;AccountName={name};AccountKey={key};" + "EndpointSuffix={core.windows.net}"; FileClient fileClient = new FileClientBuilder() .connectionString(connectionString).shareName("myshare").resourcePath("myfilepath") .buildFileClient(); return fileClient; } /** * Generates a code sample for using {@link FileClient */ public void createFile() { FileClient fileClient = createClientWithSASToken(); FileInfo response = fileClient.create(1024); System.out.println("Complete creating the file."); } /** * Generates a code sample for using {@link FileClient * String, Map, Duration, Context)} */ public void createWithResponse() { FileClient fileClient = createClientWithSASToken(); FileHTTPHeaders httpHeaders = new FileHTTPHeaders() .fileContentType("text/html") .fileContentEncoding("gzip") .fileContentLanguage("en") .fileCacheControl("no-transform") .fileContentDisposition("attachment"); FileSmbProperties smbProperties = new FileSmbProperties() .ntfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY)) .fileCreationTime(OffsetDateTime.now()) .fileLastWriteTime(OffsetDateTime.now()) .filePermissionKey("filePermissionKey"); String filePermission = "filePermission"; Response<FileInfo> response = fileClient.createWithResponse(1024, httpHeaders, smbProperties, filePermission, Collections.singletonMap("directory", "metadata"), Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("Creating the file completed with status code %d", response.statusCode()); } /** * Generates a code sample for using {@link FileClient */ public void startCopy() { FileClient fileClient = createClientWithSASToken(); FileCopyInfo response = fileClient.startCopy( "https: Collections.singletonMap("file", "metadata")); System.out.println("Complete copying the file with copy Id: " + response.copyId()); } /** * Generates a code sample for using {@link FileClient */ public void startCopyWithResponse() { FileClient fileClient = createClientWithSASToken(); Response<FileCopyInfo> response = fileClient.startCopyWithResponse( "https: Collections.singletonMap("file", "metadata"), Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Complete copying the file with copy Id: " + response.value().copyId()); } /** * Generates a code sample for using {@link FileClient */ public void abortCopyFile() { FileClient fileClient = createClientWithSASToken(); fileClient.abortCopy("someCopyId"); System.out.printf("Abort copying the file completed."); } /** * Generates a code sample for using {@link FileClient */ public void abortCopyWithResponse() { FileClient fileClient = createClientWithSASToken(); VoidResponse response = fileClient.abortCopyWithResponse("someCopyId", Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("Abort copying the file completed with status code %d", response.statusCode()); } /** * Generates a code sample for using {@link FileClient */ public void uploadData() { FileClient fileClient = createClientWithSASToken(); ByteBuffer defaultData = ByteBuffer.wrap("default".getBytes(StandardCharsets.UTF_8)); FileUploadInfo response = fileClient.upload(defaultData, defaultData.remaining()); System.out.println("Complete uploading the data with eTag: " + response.eTag()); } /** * Generates a code sample for using {@link FileClient */ public void uploadWithResponse() { FileClient fileClient = createClientWithSASToken(); ByteBuffer defaultData = ByteBuffer.wrap("default".getBytes(StandardCharsets.UTF_8)); Response<FileUploadInfo> response = fileClient.uploadWithResponse(defaultData, defaultData.remaining(), Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Complete uploading the data with status code: " + response.statusCode()); } /** * Generates a code sample for using {@link FileClient * Duration, Context)} */ public void uploadWithResponseMaxOverload() { FileClient fileClient = createClientWithSASToken(); ByteBuffer defaultData = ByteBuffer.wrap("default".getBytes(StandardCharsets.UTF_8)); Response<FileUploadInfo> response = fileClient.uploadWithResponse(defaultData, defaultData.remaining(), 1024, Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Complete uploading the data with status code: " + response.statusCode()); } /** * Generates a code sample for using {@link FileClient */ public void clearRange() { FileClient fileClient = createClientWithSASToken(); FileUploadInfo response = fileClient.clearRange(1024); System.out.println("Complete clearing the range with eTag: " + response.eTag()); } /** * Generates a code sample for using {@link FileClient */ /** * Generates a code sample for using {@link FileClient */ public void uploadFile() { FileClient fileClient = createClientWithSASToken(); fileClient.uploadFromFile("someFilePath"); } /** * Generates a code sample for using {@link FileClient */ public void uploadFileMaxOverload() { FileClient fileClient = createClientWithSASToken(); fileClient.uploadFromFile("someFilePath"); if (fileClient.getProperties() != null) { System.out.printf("Upload the file with length of %d completed", fileClient.getProperties().contentLength()); } } /** * Generates a code sample for using {@link FileClient */ public void downloadData() { FileClient fileClient = createClientWithSASToken(); FileDownloadInfo response = fileClient.downloadWithProperties(); System.out.println("Complete downloading the data."); response.body().subscribe( byteBuffer -> System.out.println("Complete downloading the data with body: " + new String(byteBuffer.array(), StandardCharsets.UTF_8)), error -> System.err.print(error.toString()), () -> System.out.println("Complete downloading the data!") ); } /** * Generates a code sample for using {@link FileClient * FileRange, Boolean, Duration, Context)} */ public void downloadWithPropertiesWithResponse() { FileClient fileClient = createClientWithSASToken(); Response<FileDownloadInfo> response = fileClient.downloadWithPropertiesWithResponse(new FileRange(1024, 2047L), false, Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Complete downloading the data with status code: " + response.statusCode()); response.value().body().subscribe( byteBuffer -> System.out.println("Complete downloading the data with body: " + new String(byteBuffer.array(), StandardCharsets.UTF_8)), error -> System.err.print(error.toString()), () -> System.out.println("Complete downloading the data!") ); } /** * Generates a code sample for using {@link FileClient */ public void downloadFile() { FileClient fileClient = createClientWithSASToken(); fileClient.downloadToFile("somelocalfilepath"); if (Files.exists(Paths.get("somelocalfilepath"))) { System.out.println("Complete downloading the file."); } } /** * Generates a code sample for using {@link FileClient */ public void downloadFileMaxOverload() { FileClient fileClient = createClientWithSASToken(); fileClient.downloadToFile("somelocalfilepath", new FileRange(1024, 2047L)); if (Files.exists(Paths.get("somelocalfilepath"))) { System.out.println("Complete downloading the file."); } } /** * Generates a code sample for using {@link FileClient * @throws URISyntaxException when the URI is invalid */ public void uploadFileFromURLAsync() throws URISyntaxException { FileClient fileClient = createClientWithSASToken(); FileUploadRangeFromURLInfo response = fileClient.uploadRangeFromURL(6, 8, 0, new URI("filewithSAStoken")); System.out.println("Completed upload range from url!"); } /** * Generates a code sample for using {@link FileClient * @throws URISyntaxException when the URI is invalid */ public void uploadFileFromURLWithResponseAsync() throws URISyntaxException { FileClient fileClient = createClientWithSASToken(); Response<FileUploadRangeFromURLInfo> response = fileClient.uploadRangeFromURLWithResponse(6, 8, 0, new URI("filewithSAStoken"), Duration.ofSeconds(1), Context.NONE); System.out.println("Completed upload range from url!"); } /** * Generates a code sample for using {@link FileClient */ public void deleteFile() { FileClient fileClient = createClientWithSASToken(); fileClient.delete(); System.out.println("Complete deleting the file."); } /** * Generates a code sample for using {@link FileClient */ public void deleteWithResponse() { FileClient fileClient = createClientWithSASToken(); VoidResponse response = fileClient.deleteWithResponse(Duration.ofSeconds(1), new Context(key1, value1)); System.out.println("Complete deleting the file with status code: " + response.statusCode()); } /** * Generates a code sample for using {@link FileClient */ public void getProperties() { FileClient fileClient = createClientWithSASToken(); FileProperties properties = fileClient.getProperties(); System.out.printf("File latest modified date is %s.", properties.lastModified()); } /** * Generates a code sample for using {@link FileClient */ public void getPropertiesWithResponse() { FileClient fileClient = createClientWithSASToken(); Response<FileProperties> response = fileClient.getPropertiesWithResponse( Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("File latest modified date is %s.", response.value().lastModified()); } /** * Generates a code sample for using {@link FileClient */ public void setMetadata() { FileClient fileClient = createClientWithSASToken(); fileClient.setMetadata(Collections.singletonMap("file", "updatedMetadata")); System.out.printf("Setting the file metadata completed."); } /** * Generates a code sample for using {@link FileClient */ public void setMetadataWithResponse() { FileClient fileClient = createClientWithSASToken(); Response<FileMetadataInfo> response = fileClient.setMetadataWithResponse( Collections.singletonMap("file", "updatedMetadata"), Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("Setting the file metadata completed with status code %d", response.statusCode()); } /** * Generates a code sample for using {@link FileClient * Duration, Context)} to clear metadata. */ public void clearMetadataWithResponse() { FileClient fileClient = createClientWithSASToken(); Response<FileMetadataInfo> response = fileClient.setMetadataWithResponse(null, Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("Setting the file metadata completed with status code %d", response.statusCode()); } /** * Generates a code sample for using {@link FileClient */ public void clearMetadata() { FileClient fileClient = createClientWithSASToken(); fileClient.setMetadata(null); System.out.printf("Setting the file metadata completed."); } /** * Generates a code sample for using {@link FileClient */ public void setHTTPHeaders() { FileClient fileClient = createClientWithSASToken(); FileHTTPHeaders httpHeaders = new FileHTTPHeaders() .fileContentType("text/html") .fileContentEncoding("gzip") .fileContentLanguage("en") .fileCacheControl("no-transform") .fileContentDisposition("attachment"); FileSmbProperties smbProperties = new FileSmbProperties() .ntfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY)) .fileCreationTime(OffsetDateTime.now()) .fileLastWriteTime(OffsetDateTime.now()) .filePermissionKey("filePermissionKey"); String filePermission = "filePermission"; fileClient.setProperties(1024, httpHeaders, smbProperties, filePermission); System.out.printf("Setting the file httpHeaders completed."); } /** * Generates a code sample for using {@link FileClient * to clear httpHeaders and preserve SMB properties. */ public void clearSyncHTTPHeaders() { FileClient fileClient = createClientWithSASToken(); FileInfo response = fileClient.setProperties(1024, null, null, null); System.out.printf("Setting the file httpHeaders completed."); } /** * Generates a code sample for using {@link FileClient * FileSmbProperties, String, Duration, Context)} */ public void setHttpHeadersWithResponse() { FileClient fileClient = createClientWithSASToken(); FileHTTPHeaders httpHeaders = new FileHTTPHeaders() .fileContentType("text/html") .fileContentEncoding("gzip") .fileContentLanguage("en") .fileCacheControl("no-transform") .fileContentDisposition("attachment"); FileSmbProperties smbProperties = new FileSmbProperties() .ntfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY)) .fileCreationTime(OffsetDateTime.now()) .fileLastWriteTime(OffsetDateTime.now()) .filePermissionKey("filePermissionKey"); String filePermission = "filePermission"; Response<FileInfo> response = fileClient.setPropertiesWithResponse(1024, httpHeaders, smbProperties, filePermission, Duration.ofSeconds(1), new Context(key1, value1)); System.out.printf("Setting the file httpHeaders completed with status code %d", response.statusCode()); } /** * Generates a code sample for using {@link FileClient * (long, FileHTTPHeaders)}
(extreme) nit: Same error condition for these two exceptions, but different wording. It may be helpful, if not superfluous, to use consistent verbiage.
public IterableStream<EventData> receive(int maximumMessageCount, Duration maximumWaitTime) { Objects.requireNonNull(maximumWaitTime, "'maximumWaitTime' cannot be null."); if (maximumMessageCount < 1) { throw new IllegalArgumentException("'maximumMessageCount' cannot be less than 1."); } else if (maximumWaitTime.isNegative() || maximumWaitTime.isZero()) { throw new IllegalArgumentException("'maximumWaitTime' cannot be zero or less."); } final Flux<EventData> events = Flux.create(emitter -> { queueWork(maximumMessageCount, maximumWaitTime, emitter); }); final Flux<EventData> map = events.collectList().map(Flux::fromIterable).block(); return new IterableStream<>(map); }
throw new IllegalArgumentException("'maximumWaitTime' cannot be zero or less.");
public IterableStream<EventData> receive(int maximumMessageCount, Duration maximumWaitTime) { Objects.requireNonNull(maximumWaitTime, "'maximumWaitTime' cannot be null."); if (maximumMessageCount < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("'maximumMessageCount' cannot be less than 1.")); } else if (maximumWaitTime.isNegative() || maximumWaitTime.isZero()) { throw logger.logExceptionAsError( new IllegalArgumentException("'maximumWaitTime' cannot be zero or less.")); } final Flux<EventData> events = Flux.create(emitter -> { queueWork(maximumMessageCount, maximumWaitTime, emitter); }); final Flux<EventData> map = events.collectList().map(x -> { logger.info("Number of events received: {}", x.size()); return Flux.fromIterable(x); }).block(); return new IterableStream<>(map); }
class EventHubConsumer implements Closeable { private static final AtomicReferenceFieldUpdater<EventHubConsumer, SynchronousEventSubscriber> SUBSCRIBER = AtomicReferenceFieldUpdater.newUpdater(EventHubConsumer.class, SynchronousEventSubscriber.class, "eventSubscriber"); private final ClientLogger logger = new ClientLogger(EventHubConsumer.class); private final AtomicLong idGenerator = new AtomicLong(); private final EventHubAsyncConsumer consumer; private final Duration timeout; private volatile SynchronousEventSubscriber eventSubscriber; EventHubConsumer(EventHubAsyncConsumer consumer, EventHubConsumerOptions options) { Objects.requireNonNull(options, "'options' cannot be null."); this.consumer = Objects.requireNonNull(consumer, "'consumer' cannot be null."); this.timeout = options.retry().tryTimeout(); } /** * Receives a batch of EventData from the Event Hub partition. * * @param maximumMessageCount The maximum number of messages to receive in this batch. * @return A set of {@link EventData} that was received. The iterable contains up to {@code maximumMessageCount} * events. * @throws IllegalArgumentException if {@code maximumMessageCount} is less than 1. */ public IterableStream<EventData> receive(int maximumMessageCount) { return receive(maximumMessageCount, timeout); } /** * Receives a batch of EventData from the Event Hub partition * * @param maximumMessageCount The maximum number of messages to receive in this batch. * @param maximumWaitTime The maximum amount of time to wait to build up the requested message count for the * batch; if not specified, the default wait time specified when the consumer was created will be used. * @return A set of {@link EventData} that was received. The iterable contains up to {@code maximumMessageCount} * events. * @throws NullPointerException if {@code maximumWaitTime} is null. * @throws IllegalArgumentException if {@code maximumMessageCount} is less than 1 or {@code maximumWaitTime} is * zero or a negative duration. */ /** * Given an {@code emitter}, queues that work in {@link SynchronousEventSubscriber}. If the {@link * has not been initialised yet, will initialise it. */ private void queueWork(int maximumMessageCount, Duration maximumWaitTime, FluxSink<EventData> emitter) { final long id = idGenerator.getAndIncrement(); final SynchronousReceiveWork work = new SynchronousReceiveWork(id, maximumMessageCount, maximumWaitTime, emitter); if (SUBSCRIBER.compareAndSet(this, null, new SynchronousEventSubscriber(work))) { logger.info("Started synchronous event subscriber."); consumer.receive().subscribeWith(SUBSCRIBER.get(this)); } else { logger.info("Queueing work item in SynchronousEventSubscriber."); SUBSCRIBER.get(this).queueReceiveWork(work); } } /** * {@inheritDoc} */ @Override public void close() throws IOException { consumer.close(); } }
class EventHubConsumer implements Closeable { private static final AtomicReferenceFieldUpdater<EventHubConsumer, SynchronousEventSubscriber> SUBSCRIBER = AtomicReferenceFieldUpdater.newUpdater(EventHubConsumer.class, SynchronousEventSubscriber.class, "eventSubscriber"); private final ClientLogger logger = new ClientLogger(EventHubConsumer.class); private final AtomicLong idGenerator = new AtomicLong(); private final EventHubAsyncConsumer consumer; private final Duration timeout; private volatile SynchronousEventSubscriber eventSubscriber; EventHubConsumer(EventHubAsyncConsumer consumer, Duration tryTimeout) { Objects.requireNonNull(tryTimeout, "'tryTimeout' cannot be null."); this.consumer = Objects.requireNonNull(consumer, "'consumer' cannot be null."); this.timeout = tryTimeout; } /** * Receives a batch of EventData from the Event Hub partition. * * @param maximumMessageCount The maximum number of messages to receive in this batch. * @return A set of {@link EventData} that was received. The iterable contains up to {@code maximumMessageCount} * events. * @throws IllegalArgumentException if {@code maximumMessageCount} is less than 1. */ public IterableStream<EventData> receive(int maximumMessageCount) { return receive(maximumMessageCount, timeout); } /** * Receives a batch of EventData from the Event Hub partition * * @param maximumMessageCount The maximum number of messages to receive in this batch. * @param maximumWaitTime The maximum amount of time to wait to build up the requested message count for the * batch; if not specified, the default wait time specified when the consumer was created will be used. * @return A set of {@link EventData} that was received. The iterable contains up to {@code maximumMessageCount} * events. * @throws NullPointerException if {@code maximumWaitTime} is null. * @throws IllegalArgumentException if {@code maximumMessageCount} is less than 1 or {@code maximumWaitTime} is * zero or a negative duration. */ /** * Given an {@code emitter}, queues that work in {@link SynchronousEventSubscriber}. If the {@link * has not been initialised yet, will initialise it. */ private void queueWork(int maximumMessageCount, Duration maximumWaitTime, FluxSink<EventData> emitter) { final long id = idGenerator.getAndIncrement(); final SynchronousReceiveWork work = new SynchronousReceiveWork(id, maximumMessageCount, maximumWaitTime, emitter); if (SUBSCRIBER.compareAndSet(this, null, new SynchronousEventSubscriber(work))) { logger.info("Started synchronous event subscriber."); consumer.receive().subscribeWith(SUBSCRIBER.get(this)); } else { logger.info("Queueing work item in SynchronousEventSubscriber."); SUBSCRIBER.get(this).queueReceiveWork(work); } } /** * {@inheritDoc} */ @Override public void close() throws IOException { consumer.close(); } }
Polling every one second?
public Poller<CertificateOperation> createCertificate(String name, CertificatePolicy policy, Map<String, String> tags) { return new Poller<CertificateOperation>(Duration.ofSeconds(1), createPollOperation(name), actvationOperation(name, policy, tags), cancelOperation(name)); }
return new Poller<CertificateOperation>(Duration.ofSeconds(1), createPollOperation(name), actvationOperation(name, policy, tags), cancelOperation(name));
public Poller<CertificateOperation> createCertificate(String name, CertificatePolicy policy, Map<String, String> tags) { return new Poller<CertificateOperation>(Duration.ofSeconds(1), createPollOperation(name), activationOperation(name, policy, tags), cancelOperation(name)); }
class CertificateAsyncClient { static final String API_VERSION = "7.0"; static final String ACCEPT_LANGUAGE = "en-US"; static final int DEFAULT_MAX_PAGE_RESULTS = 25; static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private String endpoint; private final CertificateService service; private final ClientLogger logger = new ClientLogger(CertificateAsyncClient.class); /** * Creates a CertificateAsyncClient that uses {@code pipeline} to service requests * * @param endpoint URL for the Azure KeyVault service. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. */ CertificateAsyncClient(URL endpoint, HttpPipeline pipeline) { Objects.requireNonNull(endpoint, KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED)); this.endpoint = endpoint.toString(); this.service = RestProxy.create(CertificateService.class, pipeline); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link Poller poller} allows to automatically poll on crate certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @param tags The application specific metadata to set. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link Poller} polling on the create certificate operation status. */ private Consumer<Poller<CertificateOperation>> cancelOperation(String name) { return poller -> { service.updateCertificateOperation(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, new CertificateOperationUpdateParameter().cancellationRequested(true), CONTENT_TYPE_HEADER_VALUE, Context.NONE); }; } private Supplier<Mono<CertificateOperation>> actvationOperation(String name, CertificatePolicy policy, Map<String, String> tags) { return () -> createCertificateWithResponse(name, policy, tags) .flatMap(certificateOperationResponse -> { System.out.println("Activation function"); return Mono.just(certificateOperationResponse.value()); }); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link Poller poller} allows to automatically poll on crate certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.createCertificate} * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link Poller} polling on the create certificate operation status. */ public Poller<CertificateOperation> createCertificate(String name, CertificatePolicy policy) { return createCertificate(name, policy, null); } /* Polling operation to poll on create certificate operation status. */ private Function<PollResponse<CertificateOperation>, Mono<PollResponse<CertificateOperation>>> createPollOperation(String certificateName) { return prePollResponse -> { try { System.out.println("Calling Polling Function"); return service.getCertificateOperation(endpoint, certificateName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) .flatMap(this::processCertificateOperationResponse); } catch (HttpRequestException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(PollResponse.OperationStatus.FAILED, null)); } }; } private Mono<PollResponse<CertificateOperation>> processCertificateOperationResponse(Response<CertificateOperation> certificateOperationResponse) { PollResponse.OperationStatus status = null; switch (certificateOperationResponse.value().status()) { case "inProgress": status = PollResponse.OperationStatus.IN_PROGRESS; break; case "completed": status = PollResponse.OperationStatus.SUCCESSFULLY_COMPLETED; break; case "failed": status = PollResponse.OperationStatus.FAILED; break; } return Mono.just(new PollResponse<>(status, certificateOperationResponse.value())); } Mono<Response<CertificateOperation>> createCertificateWithResponse(String name, CertificatePolicy certificatePolicy, Map<String, String> tags) { CertificateRequestParameters certificateRequestParameters = new CertificateRequestParameters() .certificatePolicy(new CertificatePolicyRequest(certificatePolicy)) .tags(tags); return service.createCertificate(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, certificateRequestParameters, CONTENT_TYPE_HEADER_VALUE); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.getCertificateWithPolicy} * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link Certificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Certificate> getCertificateWithPolicy(String name) { return withContext(context -> getCertificateWithResponse(name, "", context)).flatMap(FluxUtil::toMono); } Mono<Response<Certificate>> getCertificateWithResponse(String name, String version, Context context) { return service.getCertificate(endpoint, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.value().name())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.getCertificateWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null, this call is equivalent to calling {@link CertificateAsyncClient * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Certificate>> getCertificateWithResponse(String name, String version) { return withContext(context -> getCertificateWithResponse(name, version == null ? "" : version, context)); } /** * Gets information about the specified version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.getCertificate} * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null, this call is equivalent to calling {@link CertificateAsyncClient * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link Certificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Certificate> getCertificate(String name, String version) { return withContext(context -> getCertificateWithResponse(name, version == null ? "" : version, context)).flatMap(FluxUtil::toMono); } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its tags and enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.updateCertificate * * @param certificate The {@link CertificateBase base certificate} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateBase * @throws HttpRequestException if {@link CertificateBase * @return A {@link Mono} containing the {@link CertificateBase updated certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Certificate> updateCertificate(CertificateBase certificate) { return withContext(context -> updateCertificateWithResponse(certificate, context)).flatMap(FluxUtil::toMono); } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged.. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.updateCertificateWithResponse * * @param certificate The {@link CertificateBase base certificate} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateBase * @throws HttpRequestException if {@link CertificateBase * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Certificate>> updateCertificateWithResponse(CertificateBase certificate) { return withContext(context -> updateCertificateWithResponse(certificate, context)); } Mono<Response<Certificate>> updateCertificateWithResponse(CertificateBase certificateBase, Context context) { Objects.requireNonNull(certificateBase, "The certificate input parameter cannot be null"); CertificateUpdateParameters parameters = new CertificateUpdateParameters() .tags(certificateBase.tags()) .certificateAttributes(new CertificateRequestAttributes(certificateBase)); return service.updateCertificate(endpoint, certificateBase.name(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Updating certificate - {}", certificateBase.name())) .doOnSuccess(response -> logger.info("Updated the certificate - {}", certificateBase.name())) .doOnError(error -> logger.warning("Failed to update the certificate - {}", certificateBase.name(), error)); } /** * Gets information about the certificate which represents {@link CertificateBase certificateBase} from the key vault. This * operation requires the certificates/get permission. * * <p>The list operations {@link CertificateAsyncClient * the {@link Flux} containing {@link CertificateBase base certificate} as output excluding the properties like secretId and keyId of the certificate. * This operation can then be used to get the full certificate with its properties excluding the policy from {@code certificateBase}.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.getCertificate * * @param certificateBase The {@link CertificateBase base certificate} holding attributes of the certificate being requested. * @throws ResourceNotFoundException when a certificate with {@link CertificateBase * @throws HttpRequestException if {@link CertificateBase
class CertificateAsyncClient { static final String API_VERSION = "7.0"; static final String ACCEPT_LANGUAGE = "en-US"; static final int DEFAULT_MAX_PAGE_RESULTS = 25; static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private final String endpoint; private final CertificateService service; private final ClientLogger logger = new ClientLogger(CertificateAsyncClient.class); /** * Creates a CertificateAsyncClient that uses {@code pipeline} to service requests * * @param endpoint URL for the Azure KeyVault service. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. */ CertificateAsyncClient(URL endpoint, HttpPipeline pipeline) { Objects.requireNonNull(endpoint, KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED)); this.endpoint = endpoint.toString(); this.service = RestProxy.create(CertificateService.class, pipeline); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link Poller poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @param tags The application specific metadata to set. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link Poller} polling on the create certificate operation status. */ private Consumer<Poller<CertificateOperation>> cancelOperation(String name) { return poller -> withContext(context -> cancelCertificateOperationWithResponse(name, context)); } private Supplier<Mono<CertificateOperation>> activationOperation(String name, CertificatePolicy policy, Map<String, String> tags) { return () -> withContext(context -> createCertificateWithResponse(name, policy, tags, context) .flatMap(certificateOperationResponse -> Mono.just(certificateOperationResponse.value()))); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link Poller poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link Poller} polling on the create certificate operation status. */ public Poller<CertificateOperation> createCertificate(String name, CertificatePolicy policy) { return createCertificate(name, policy, null); } /* Polling operation to poll on create certificate operation status. */ private Function<PollResponse<CertificateOperation>, Mono<PollResponse<CertificateOperation>>> createPollOperation(String certificateName) { return prePollResponse -> { try { return withContext(context -> service.getCertificateOperation(endpoint, certificateName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .flatMap(this::processCertificateOperationResponse)); } catch (HttpRequestException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(PollResponse.OperationStatus.FAILED, null)); } }; } private Mono<PollResponse<CertificateOperation>> processCertificateOperationResponse(Response<CertificateOperation> certificateOperationResponse) { PollResponse.OperationStatus status = null; switch (certificateOperationResponse.value().status()) { case "inProgress": status = PollResponse.OperationStatus.IN_PROGRESS; break; case "completed": status = PollResponse.OperationStatus.SUCCESSFULLY_COMPLETED; break; case "failed": status = PollResponse.OperationStatus.FAILED; break; default: break; } return Mono.just(new PollResponse<>(status, certificateOperationResponse.value())); } Mono<Response<CertificateOperation>> createCertificateWithResponse(String name, CertificatePolicy certificatePolicy, Map<String, String> tags, Context context) { CertificateRequestParameters certificateRequestParameters = new CertificateRequestParameters() .certificatePolicy(new CertificatePolicyRequest(certificatePolicy)) .tags(tags); return service.createCertificate(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, certificateRequestParameters, CONTENT_TYPE_HEADER_VALUE, context); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateWithPolicy * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link Certificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Certificate> getCertificateWithPolicy(String name) { return withContext(context -> getCertificateWithResponse(name, "", context)).flatMap(FluxUtil::toMono); } Mono<Response<Certificate>> getCertificateWithResponse(String name, String version, Context context) { return service.getCertificate(endpoint, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.value().name())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null, this call is equivalent to calling {@link CertificateAsyncClient * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Certificate>> getCertificateWithResponse(String name, String version) { return withContext(context -> getCertificateWithResponse(name, version == null ? "" : version, context)); } /** * Gets information about the specified version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificate * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null, this call is equivalent to calling {@link CertificateAsyncClient * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link Certificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Certificate> getCertificate(String name, String version) { return withContext(context -> getCertificateWithResponse(name, version == null ? "" : version, context)).flatMap(FluxUtil::toMono); } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its tags and enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificate * * @param certificate The {@link CertificateBase} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateBase * @throws HttpRequestException if {@link CertificateBase * @return A {@link Mono} containing the {@link CertificateBase updated certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Certificate> updateCertificate(CertificateBase certificate) { return withContext(context -> updateCertificateWithResponse(certificate, context)).flatMap(FluxUtil::toMono); } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificateWithResponse * * @param certificate The {@link CertificateBase} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateBase * @throws HttpRequestException if {@link CertificateBase * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Certificate>> updateCertificateWithResponse(CertificateBase certificate) { return withContext(context -> updateCertificateWithResponse(certificate, context)); } Mono<Response<Certificate>> updateCertificateWithResponse(CertificateBase certificateBase, Context context) { Objects.requireNonNull(certificateBase, "The certificate input parameter cannot be null"); CertificateUpdateParameters parameters = new CertificateUpdateParameters() .tags(certificateBase.tags()) .certificateAttributes(new CertificateRequestAttributes(certificateBase)); return service.updateCertificate(endpoint, certificateBase.name(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Updating certificate - {}", certificateBase.name())) .doOnSuccess(response -> logger.info("Updated the certificate - {}", certificateBase.name())) .doOnError(error -> logger.warning("Failed to update the certificate - {}", certificateBase.name(), error)); } /** * Gets information about the certificate which represents the {@link CertificateBase} from the key vault. This * operation requires the certificates/get permission. * * <p>The list operations {@link CertificateAsyncClient * the {@link Flux} containing {@link CertificateBase} as output excluding the properties like secretId and keyId of the certificate. * This operation can then be used to get the full certificate with its properties excluding the policy from {@code certificateBase}.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificate * * @param certificateBase The {@link CertificateBase} holding attributes of the certificate being requested. * @throws ResourceNotFoundException when a certificate with {@link CertificateBase * @throws HttpRequestException if {@link CertificateBase
Correct - this needs to change in line with the other async client libraries where the Context is extracted from reactor and pass into the service call.
private Consumer<Poller<CertificateOperation>> cancelOperation(String name) { return poller -> { service.updateCertificateOperation(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, new CertificateOperationUpdateParameter().cancellationRequested(true), CONTENT_TYPE_HEADER_VALUE, Context.NONE); }; }
new CertificateOperationUpdateParameter().cancellationRequested(true), CONTENT_TYPE_HEADER_VALUE, Context.NONE);
new CertificateOperationUpdateParameter().cancellationRequested(true); return service.updateCertificateOperation(endpoint, certificateName, API_VERSION, ACCEPT_LANGUAGE, parameter, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Cancelling certificate operation - {}
class CertificateAsyncClient { static final String API_VERSION = "7.0"; static final String ACCEPT_LANGUAGE = "en-US"; static final int DEFAULT_MAX_PAGE_RESULTS = 25; static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private String endpoint; private final CertificateService service; private final ClientLogger logger = new ClientLogger(CertificateAsyncClient.class); /** * Creates a CertificateAsyncClient that uses {@code pipeline} to service requests * * @param endpoint URL for the Azure KeyVault service. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. */ CertificateAsyncClient(URL endpoint, HttpPipeline pipeline) { Objects.requireNonNull(endpoint, KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED)); this.endpoint = endpoint.toString(); this.service = RestProxy.create(CertificateService.class, pipeline); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link Poller poller} allows to automatically poll on crate certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @param tags The application specific metadata to set. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link Poller} polling on the create certificate operation status. */ public Poller<CertificateOperation> createCertificate(String name, CertificatePolicy policy, Map<String, String> tags) { return new Poller<CertificateOperation>(Duration.ofSeconds(1), createPollOperation(name), actvationOperation(name, policy, tags), cancelOperation(name)); } private Consumer<Poller<CertificateOperation>> cancelOperation(String name) { return poller -> { service.updateCertificateOperation(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link Poller poller} allows to automatically poll on crate certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.createCertificate} * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link Poller} polling on the create certificate operation status. */ public Poller<CertificateOperation> createCertificate(String name, CertificatePolicy policy) { return createCertificate(name, policy, null); } /* Polling operation to poll on create certificate operation status. */ private Function<PollResponse<CertificateOperation>, Mono<PollResponse<CertificateOperation>>> createPollOperation(String certificateName) { return prePollResponse -> { try { System.out.println("Calling Polling Function"); return service.getCertificateOperation(endpoint, certificateName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) .flatMap(this::processCertificateOperationResponse); } catch (HttpRequestException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(PollResponse.OperationStatus.FAILED, null)); } }; } private Mono<PollResponse<CertificateOperation>> processCertificateOperationResponse(Response<CertificateOperation> certificateOperationResponse) { PollResponse.OperationStatus status = null; switch (certificateOperationResponse.value().status()) { case "inProgress": status = PollResponse.OperationStatus.IN_PROGRESS; break; case "completed": status = PollResponse.OperationStatus.SUCCESSFULLY_COMPLETED; break; case "failed": status = PollResponse.OperationStatus.FAILED; break; } return Mono.just(new PollResponse<>(status, certificateOperationResponse.value())); } Mono<Response<CertificateOperation>> createCertificateWithResponse(String name, CertificatePolicy certificatePolicy, Map<String, String> tags) { CertificateRequestParameters certificateRequestParameters = new CertificateRequestParameters() .certificatePolicy(new CertificatePolicyRequest(certificatePolicy)) .tags(tags); return service.createCertificate(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, certificateRequestParameters, CONTENT_TYPE_HEADER_VALUE); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.getCertificateWithPolicy} * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link Certificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Certificate> getCertificateWithPolicy(String name) { return withContext(context -> getCertificateWithResponse(name, "", context)).flatMap(FluxUtil::toMono); } Mono<Response<Certificate>> getCertificateWithResponse(String name, String version, Context context) { return service.getCertificate(endpoint, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.value().name())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.getCertificateWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null, this call is equivalent to calling {@link CertificateAsyncClient * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Certificate>> getCertificateWithResponse(String name, String version) { return withContext(context -> getCertificateWithResponse(name, version == null ? "" : version, context)); } /** * Gets information about the specified version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.getCertificate} * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null, this call is equivalent to calling {@link CertificateAsyncClient * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link Certificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Certificate> getCertificate(String name, String version) { return withContext(context -> getCertificateWithResponse(name, version == null ? "" : version, context)).flatMap(FluxUtil::toMono); } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its tags and enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.updateCertificate * * @param certificate The {@link CertificateBase base certificate} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateBase * @throws HttpRequestException if {@link CertificateBase * @return A {@link Mono} containing the {@link CertificateBase updated certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Certificate> updateCertificate(CertificateBase certificate) { return withContext(context -> updateCertificateWithResponse(certificate, context)).flatMap(FluxUtil::toMono); } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged.. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.updateCertificateWithResponse * * @param certificate The {@link CertificateBase base certificate} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateBase * @throws HttpRequestException if {@link CertificateBase * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Certificate>> updateCertificateWithResponse(CertificateBase certificate) { return withContext(context -> updateCertificateWithResponse(certificate, context)); } Mono<Response<Certificate>> updateCertificateWithResponse(CertificateBase certificateBase, Context context) { Objects.requireNonNull(certificateBase, "The certificate input parameter cannot be null"); CertificateUpdateParameters parameters = new CertificateUpdateParameters() .tags(certificateBase.tags()) .certificateAttributes(new CertificateRequestAttributes(certificateBase)); return service.updateCertificate(endpoint, certificateBase.name(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Updating certificate - {}", certificateBase.name())) .doOnSuccess(response -> logger.info("Updated the certificate - {}", certificateBase.name())) .doOnError(error -> logger.warning("Failed to update the certificate - {}", certificateBase.name(), error)); } /** * Gets information about the certificate which represents {@link CertificateBase certificateBase} from the key vault. This * operation requires the certificates/get permission. * * <p>The list operations {@link CertificateAsyncClient * the {@link Flux} containing {@link CertificateBase base certificate} as output excluding the properties like secretId and keyId of the certificate. * This operation can then be used to get the full certificate with its properties excluding the policy from {@code certificateBase}.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.getCertificate * * @param certificateBase The {@link CertificateBase base certificate} holding attributes of the certificate being requested. * @throws ResourceNotFoundException when a certificate with {@link CertificateBase * @throws HttpRequestException if {@link CertificateBase
class CertificateAsyncClient { static final String API_VERSION = "7.0"; static final String ACCEPT_LANGUAGE = "en-US"; static final int DEFAULT_MAX_PAGE_RESULTS = 25; static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private final String endpoint; private final CertificateService service; private final ClientLogger logger = new ClientLogger(CertificateAsyncClient.class); /** * Creates a CertificateAsyncClient that uses {@code pipeline} to service requests * * @param endpoint URL for the Azure KeyVault service. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. */ CertificateAsyncClient(URL endpoint, HttpPipeline pipeline) { Objects.requireNonNull(endpoint, KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED)); this.endpoint = endpoint.toString(); this.service = RestProxy.create(CertificateService.class, pipeline); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link Poller poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @param tags The application specific metadata to set. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link Poller} polling on the create certificate operation status. */ public Poller<CertificateOperation> createCertificate(String name, CertificatePolicy policy, Map<String, String> tags) { return new Poller<CertificateOperation>(Duration.ofSeconds(1), createPollOperation(name), activationOperation(name, policy, tags), cancelOperation(name)); } private Consumer<Poller<CertificateOperation>> cancelOperation(String name) { return poller -> withContext(context -> cancelCertificateOperationWithResponse(name, context)); } private Supplier<Mono<CertificateOperation>> activationOperation(String name, CertificatePolicy policy, Map<String, String> tags) { return () -> withContext(context -> createCertificateWithResponse(name, policy, tags, context) .flatMap(certificateOperationResponse -> Mono.just(certificateOperationResponse.value()))); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link Poller poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link Poller} polling on the create certificate operation status. */ public Poller<CertificateOperation> createCertificate(String name, CertificatePolicy policy) { return createCertificate(name, policy, null); } /* Polling operation to poll on create certificate operation status. */ private Function<PollResponse<CertificateOperation>, Mono<PollResponse<CertificateOperation>>> createPollOperation(String certificateName) { return prePollResponse -> { try { return withContext(context -> service.getCertificateOperation(endpoint, certificateName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .flatMap(this::processCertificateOperationResponse)); } catch (HttpRequestException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(PollResponse.OperationStatus.FAILED, null)); } }; } private Mono<PollResponse<CertificateOperation>> processCertificateOperationResponse(Response<CertificateOperation> certificateOperationResponse) { PollResponse.OperationStatus status = null; switch (certificateOperationResponse.value().status()) { case "inProgress": status = PollResponse.OperationStatus.IN_PROGRESS; break; case "completed": status = PollResponse.OperationStatus.SUCCESSFULLY_COMPLETED; break; case "failed": status = PollResponse.OperationStatus.FAILED; break; default: break; } return Mono.just(new PollResponse<>(status, certificateOperationResponse.value())); } Mono<Response<CertificateOperation>> createCertificateWithResponse(String name, CertificatePolicy certificatePolicy, Map<String, String> tags, Context context) { CertificateRequestParameters certificateRequestParameters = new CertificateRequestParameters() .certificatePolicy(new CertificatePolicyRequest(certificatePolicy)) .tags(tags); return service.createCertificate(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, certificateRequestParameters, CONTENT_TYPE_HEADER_VALUE, context); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateWithPolicy * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link Certificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Certificate> getCertificateWithPolicy(String name) { return withContext(context -> getCertificateWithResponse(name, "", context)).flatMap(FluxUtil::toMono); } Mono<Response<Certificate>> getCertificateWithResponse(String name, String version, Context context) { return service.getCertificate(endpoint, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.value().name())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null, this call is equivalent to calling {@link CertificateAsyncClient * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Certificate>> getCertificateWithResponse(String name, String version) { return withContext(context -> getCertificateWithResponse(name, version == null ? "" : version, context)); } /** * Gets information about the specified version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificate * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null, this call is equivalent to calling {@link CertificateAsyncClient * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link Certificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Certificate> getCertificate(String name, String version) { return withContext(context -> getCertificateWithResponse(name, version == null ? "" : version, context)).flatMap(FluxUtil::toMono); } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its tags and enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificate * * @param certificate The {@link CertificateBase} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateBase * @throws HttpRequestException if {@link CertificateBase * @return A {@link Mono} containing the {@link CertificateBase updated certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Certificate> updateCertificate(CertificateBase certificate) { return withContext(context -> updateCertificateWithResponse(certificate, context)).flatMap(FluxUtil::toMono); } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificateWithResponse * * @param certificate The {@link CertificateBase} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateBase * @throws HttpRequestException if {@link CertificateBase * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Certificate>> updateCertificateWithResponse(CertificateBase certificate) { return withContext(context -> updateCertificateWithResponse(certificate, context)); } Mono<Response<Certificate>> updateCertificateWithResponse(CertificateBase certificateBase, Context context) { Objects.requireNonNull(certificateBase, "The certificate input parameter cannot be null"); CertificateUpdateParameters parameters = new CertificateUpdateParameters() .tags(certificateBase.tags()) .certificateAttributes(new CertificateRequestAttributes(certificateBase)); return service.updateCertificate(endpoint, certificateBase.name(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Updating certificate - {}", certificateBase.name())) .doOnSuccess(response -> logger.info("Updated the certificate - {}", certificateBase.name())) .doOnError(error -> logger.warning("Failed to update the certificate - {}", certificateBase.name(), error)); } /** * Gets information about the certificate which represents the {@link CertificateBase} from the key vault. This * operation requires the certificates/get permission. * * <p>The list operations {@link CertificateAsyncClient * the {@link Flux} containing {@link CertificateBase} as output excluding the properties like secretId and keyId of the certificate. * This operation can then be used to get the full certificate with its properties excluding the policy from {@code certificateBase}.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificate * * @param certificateBase The {@link CertificateBase} holding attributes of the certificate being requested. * @throws ResourceNotFoundException when a certificate with {@link CertificateBase * @throws HttpRequestException if {@link CertificateBase
fixed.
private Consumer<Poller<CertificateOperation>> cancelOperation(String name) { return poller -> { service.updateCertificateOperation(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, new CertificateOperationUpdateParameter().cancellationRequested(true), CONTENT_TYPE_HEADER_VALUE, Context.NONE); }; }
new CertificateOperationUpdateParameter().cancellationRequested(true), CONTENT_TYPE_HEADER_VALUE, Context.NONE);
new CertificateOperationUpdateParameter().cancellationRequested(true); return service.updateCertificateOperation(endpoint, certificateName, API_VERSION, ACCEPT_LANGUAGE, parameter, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Cancelling certificate operation - {}
class CertificateAsyncClient { static final String API_VERSION = "7.0"; static final String ACCEPT_LANGUAGE = "en-US"; static final int DEFAULT_MAX_PAGE_RESULTS = 25; static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private String endpoint; private final CertificateService service; private final ClientLogger logger = new ClientLogger(CertificateAsyncClient.class); /** * Creates a CertificateAsyncClient that uses {@code pipeline} to service requests * * @param endpoint URL for the Azure KeyVault service. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. */ CertificateAsyncClient(URL endpoint, HttpPipeline pipeline) { Objects.requireNonNull(endpoint, KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED)); this.endpoint = endpoint.toString(); this.service = RestProxy.create(CertificateService.class, pipeline); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link Poller poller} allows to automatically poll on crate certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @param tags The application specific metadata to set. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link Poller} polling on the create certificate operation status. */ public Poller<CertificateOperation> createCertificate(String name, CertificatePolicy policy, Map<String, String> tags) { return new Poller<CertificateOperation>(Duration.ofSeconds(1), createPollOperation(name), actvationOperation(name, policy, tags), cancelOperation(name)); } private Consumer<Poller<CertificateOperation>> cancelOperation(String name) { return poller -> { service.updateCertificateOperation(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link Poller poller} allows to automatically poll on crate certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.createCertificate} * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link Poller} polling on the create certificate operation status. */ public Poller<CertificateOperation> createCertificate(String name, CertificatePolicy policy) { return createCertificate(name, policy, null); } /* Polling operation to poll on create certificate operation status. */ private Function<PollResponse<CertificateOperation>, Mono<PollResponse<CertificateOperation>>> createPollOperation(String certificateName) { return prePollResponse -> { try { System.out.println("Calling Polling Function"); return service.getCertificateOperation(endpoint, certificateName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) .flatMap(this::processCertificateOperationResponse); } catch (HttpRequestException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(PollResponse.OperationStatus.FAILED, null)); } }; } private Mono<PollResponse<CertificateOperation>> processCertificateOperationResponse(Response<CertificateOperation> certificateOperationResponse) { PollResponse.OperationStatus status = null; switch (certificateOperationResponse.value().status()) { case "inProgress": status = PollResponse.OperationStatus.IN_PROGRESS; break; case "completed": status = PollResponse.OperationStatus.SUCCESSFULLY_COMPLETED; break; case "failed": status = PollResponse.OperationStatus.FAILED; break; } return Mono.just(new PollResponse<>(status, certificateOperationResponse.value())); } Mono<Response<CertificateOperation>> createCertificateWithResponse(String name, CertificatePolicy certificatePolicy, Map<String, String> tags) { CertificateRequestParameters certificateRequestParameters = new CertificateRequestParameters() .certificatePolicy(new CertificatePolicyRequest(certificatePolicy)) .tags(tags); return service.createCertificate(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, certificateRequestParameters, CONTENT_TYPE_HEADER_VALUE); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.getCertificateWithPolicy} * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link Certificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Certificate> getCertificateWithPolicy(String name) { return withContext(context -> getCertificateWithResponse(name, "", context)).flatMap(FluxUtil::toMono); } Mono<Response<Certificate>> getCertificateWithResponse(String name, String version, Context context) { return service.getCertificate(endpoint, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.value().name())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.getCertificateWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null, this call is equivalent to calling {@link CertificateAsyncClient * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Certificate>> getCertificateWithResponse(String name, String version) { return withContext(context -> getCertificateWithResponse(name, version == null ? "" : version, context)); } /** * Gets information about the specified version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.getCertificate} * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null, this call is equivalent to calling {@link CertificateAsyncClient * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link Certificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Certificate> getCertificate(String name, String version) { return withContext(context -> getCertificateWithResponse(name, version == null ? "" : version, context)).flatMap(FluxUtil::toMono); } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its tags and enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.updateCertificate * * @param certificate The {@link CertificateBase base certificate} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateBase * @throws HttpRequestException if {@link CertificateBase * @return A {@link Mono} containing the {@link CertificateBase updated certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Certificate> updateCertificate(CertificateBase certificate) { return withContext(context -> updateCertificateWithResponse(certificate, context)).flatMap(FluxUtil::toMono); } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged.. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.updateCertificateWithResponse * * @param certificate The {@link CertificateBase base certificate} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateBase * @throws HttpRequestException if {@link CertificateBase * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Certificate>> updateCertificateWithResponse(CertificateBase certificate) { return withContext(context -> updateCertificateWithResponse(certificate, context)); } Mono<Response<Certificate>> updateCertificateWithResponse(CertificateBase certificateBase, Context context) { Objects.requireNonNull(certificateBase, "The certificate input parameter cannot be null"); CertificateUpdateParameters parameters = new CertificateUpdateParameters() .tags(certificateBase.tags()) .certificateAttributes(new CertificateRequestAttributes(certificateBase)); return service.updateCertificate(endpoint, certificateBase.name(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Updating certificate - {}", certificateBase.name())) .doOnSuccess(response -> logger.info("Updated the certificate - {}", certificateBase.name())) .doOnError(error -> logger.warning("Failed to update the certificate - {}", certificateBase.name(), error)); } /** * Gets information about the certificate which represents {@link CertificateBase certificateBase} from the key vault. This * operation requires the certificates/get permission. * * <p>The list operations {@link CertificateAsyncClient * the {@link Flux} containing {@link CertificateBase base certificate} as output excluding the properties like secretId and keyId of the certificate. * This operation can then be used to get the full certificate with its properties excluding the policy from {@code certificateBase}.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.getCertificate * * @param certificateBase The {@link CertificateBase base certificate} holding attributes of the certificate being requested. * @throws ResourceNotFoundException when a certificate with {@link CertificateBase * @throws HttpRequestException if {@link CertificateBase
class CertificateAsyncClient { static final String API_VERSION = "7.0"; static final String ACCEPT_LANGUAGE = "en-US"; static final int DEFAULT_MAX_PAGE_RESULTS = 25; static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private final String endpoint; private final CertificateService service; private final ClientLogger logger = new ClientLogger(CertificateAsyncClient.class); /** * Creates a CertificateAsyncClient that uses {@code pipeline} to service requests * * @param endpoint URL for the Azure KeyVault service. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. */ CertificateAsyncClient(URL endpoint, HttpPipeline pipeline) { Objects.requireNonNull(endpoint, KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED)); this.endpoint = endpoint.toString(); this.service = RestProxy.create(CertificateService.class, pipeline); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link Poller poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @param tags The application specific metadata to set. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link Poller} polling on the create certificate operation status. */ public Poller<CertificateOperation> createCertificate(String name, CertificatePolicy policy, Map<String, String> tags) { return new Poller<CertificateOperation>(Duration.ofSeconds(1), createPollOperation(name), activationOperation(name, policy, tags), cancelOperation(name)); } private Consumer<Poller<CertificateOperation>> cancelOperation(String name) { return poller -> withContext(context -> cancelCertificateOperationWithResponse(name, context)); } private Supplier<Mono<CertificateOperation>> activationOperation(String name, CertificatePolicy policy, Map<String, String> tags) { return () -> withContext(context -> createCertificateWithResponse(name, policy, tags, context) .flatMap(certificateOperationResponse -> Mono.just(certificateOperationResponse.value()))); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link Poller poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link Poller} polling on the create certificate operation status. */ public Poller<CertificateOperation> createCertificate(String name, CertificatePolicy policy) { return createCertificate(name, policy, null); } /* Polling operation to poll on create certificate operation status. */ private Function<PollResponse<CertificateOperation>, Mono<PollResponse<CertificateOperation>>> createPollOperation(String certificateName) { return prePollResponse -> { try { return withContext(context -> service.getCertificateOperation(endpoint, certificateName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .flatMap(this::processCertificateOperationResponse)); } catch (HttpRequestException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(PollResponse.OperationStatus.FAILED, null)); } }; } private Mono<PollResponse<CertificateOperation>> processCertificateOperationResponse(Response<CertificateOperation> certificateOperationResponse) { PollResponse.OperationStatus status = null; switch (certificateOperationResponse.value().status()) { case "inProgress": status = PollResponse.OperationStatus.IN_PROGRESS; break; case "completed": status = PollResponse.OperationStatus.SUCCESSFULLY_COMPLETED; break; case "failed": status = PollResponse.OperationStatus.FAILED; break; default: break; } return Mono.just(new PollResponse<>(status, certificateOperationResponse.value())); } Mono<Response<CertificateOperation>> createCertificateWithResponse(String name, CertificatePolicy certificatePolicy, Map<String, String> tags, Context context) { CertificateRequestParameters certificateRequestParameters = new CertificateRequestParameters() .certificatePolicy(new CertificatePolicyRequest(certificatePolicy)) .tags(tags); return service.createCertificate(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, certificateRequestParameters, CONTENT_TYPE_HEADER_VALUE, context); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateWithPolicy * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link Certificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Certificate> getCertificateWithPolicy(String name) { return withContext(context -> getCertificateWithResponse(name, "", context)).flatMap(FluxUtil::toMono); } Mono<Response<Certificate>> getCertificateWithResponse(String name, String version, Context context) { return service.getCertificate(endpoint, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.value().name())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null, this call is equivalent to calling {@link CertificateAsyncClient * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Certificate>> getCertificateWithResponse(String name, String version) { return withContext(context -> getCertificateWithResponse(name, version == null ? "" : version, context)); } /** * Gets information about the specified version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificate * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null, this call is equivalent to calling {@link CertificateAsyncClient * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link Certificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Certificate> getCertificate(String name, String version) { return withContext(context -> getCertificateWithResponse(name, version == null ? "" : version, context)).flatMap(FluxUtil::toMono); } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its tags and enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificate * * @param certificate The {@link CertificateBase} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateBase * @throws HttpRequestException if {@link CertificateBase * @return A {@link Mono} containing the {@link CertificateBase updated certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Certificate> updateCertificate(CertificateBase certificate) { return withContext(context -> updateCertificateWithResponse(certificate, context)).flatMap(FluxUtil::toMono); } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificateWithResponse * * @param certificate The {@link CertificateBase} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateBase * @throws HttpRequestException if {@link CertificateBase * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Certificate>> updateCertificateWithResponse(CertificateBase certificate) { return withContext(context -> updateCertificateWithResponse(certificate, context)); } Mono<Response<Certificate>> updateCertificateWithResponse(CertificateBase certificateBase, Context context) { Objects.requireNonNull(certificateBase, "The certificate input parameter cannot be null"); CertificateUpdateParameters parameters = new CertificateUpdateParameters() .tags(certificateBase.tags()) .certificateAttributes(new CertificateRequestAttributes(certificateBase)); return service.updateCertificate(endpoint, certificateBase.name(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Updating certificate - {}", certificateBase.name())) .doOnSuccess(response -> logger.info("Updated the certificate - {}", certificateBase.name())) .doOnError(error -> logger.warning("Failed to update the certificate - {}", certificateBase.name(), error)); } /** * Gets information about the certificate which represents the {@link CertificateBase} from the key vault. This * operation requires the certificates/get permission. * * <p>The list operations {@link CertificateAsyncClient * the {@link Flux} containing {@link CertificateBase} as output excluding the properties like secretId and keyId of the certificate. * This operation can then be used to get the full certificate with its properties excluding the policy from {@code certificateBase}.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificate * * @param certificateBase The {@link CertificateBase} holding attributes of the certificate being requested. * @throws ResourceNotFoundException when a certificate with {@link CertificateBase * @throws HttpRequestException if {@link CertificateBase
made it 10 seconds. The trivial cases of certificate take around 5 - 15 seconds to finish. Third party issuers cases can take longer.
public Poller<CertificateOperation> createCertificate(String name, CertificatePolicy policy, Map<String, String> tags) { return new Poller<CertificateOperation>(Duration.ofSeconds(1), createPollOperation(name), actvationOperation(name, policy, tags), cancelOperation(name)); }
return new Poller<CertificateOperation>(Duration.ofSeconds(1), createPollOperation(name), actvationOperation(name, policy, tags), cancelOperation(name));
public Poller<CertificateOperation> createCertificate(String name, CertificatePolicy policy, Map<String, String> tags) { return new Poller<CertificateOperation>(Duration.ofSeconds(1), createPollOperation(name), activationOperation(name, policy, tags), cancelOperation(name)); }
class CertificateAsyncClient { static final String API_VERSION = "7.0"; static final String ACCEPT_LANGUAGE = "en-US"; static final int DEFAULT_MAX_PAGE_RESULTS = 25; static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private String endpoint; private final CertificateService service; private final ClientLogger logger = new ClientLogger(CertificateAsyncClient.class); /** * Creates a CertificateAsyncClient that uses {@code pipeline} to service requests * * @param endpoint URL for the Azure KeyVault service. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. */ CertificateAsyncClient(URL endpoint, HttpPipeline pipeline) { Objects.requireNonNull(endpoint, KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED)); this.endpoint = endpoint.toString(); this.service = RestProxy.create(CertificateService.class, pipeline); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link Poller poller} allows to automatically poll on crate certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @param tags The application specific metadata to set. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link Poller} polling on the create certificate operation status. */ private Consumer<Poller<CertificateOperation>> cancelOperation(String name) { return poller -> { service.updateCertificateOperation(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, new CertificateOperationUpdateParameter().cancellationRequested(true), CONTENT_TYPE_HEADER_VALUE, Context.NONE); }; } private Supplier<Mono<CertificateOperation>> actvationOperation(String name, CertificatePolicy policy, Map<String, String> tags) { return () -> createCertificateWithResponse(name, policy, tags) .flatMap(certificateOperationResponse -> { System.out.println("Activation function"); return Mono.just(certificateOperationResponse.value()); }); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link Poller poller} allows to automatically poll on crate certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.createCertificate} * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link Poller} polling on the create certificate operation status. */ public Poller<CertificateOperation> createCertificate(String name, CertificatePolicy policy) { return createCertificate(name, policy, null); } /* Polling operation to poll on create certificate operation status. */ private Function<PollResponse<CertificateOperation>, Mono<PollResponse<CertificateOperation>>> createPollOperation(String certificateName) { return prePollResponse -> { try { System.out.println("Calling Polling Function"); return service.getCertificateOperation(endpoint, certificateName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) .flatMap(this::processCertificateOperationResponse); } catch (HttpRequestException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(PollResponse.OperationStatus.FAILED, null)); } }; } private Mono<PollResponse<CertificateOperation>> processCertificateOperationResponse(Response<CertificateOperation> certificateOperationResponse) { PollResponse.OperationStatus status = null; switch (certificateOperationResponse.value().status()) { case "inProgress": status = PollResponse.OperationStatus.IN_PROGRESS; break; case "completed": status = PollResponse.OperationStatus.SUCCESSFULLY_COMPLETED; break; case "failed": status = PollResponse.OperationStatus.FAILED; break; } return Mono.just(new PollResponse<>(status, certificateOperationResponse.value())); } Mono<Response<CertificateOperation>> createCertificateWithResponse(String name, CertificatePolicy certificatePolicy, Map<String, String> tags) { CertificateRequestParameters certificateRequestParameters = new CertificateRequestParameters() .certificatePolicy(new CertificatePolicyRequest(certificatePolicy)) .tags(tags); return service.createCertificate(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, certificateRequestParameters, CONTENT_TYPE_HEADER_VALUE); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.getCertificateWithPolicy} * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link Certificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Certificate> getCertificateWithPolicy(String name) { return withContext(context -> getCertificateWithResponse(name, "", context)).flatMap(FluxUtil::toMono); } Mono<Response<Certificate>> getCertificateWithResponse(String name, String version, Context context) { return service.getCertificate(endpoint, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.value().name())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.getCertificateWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null, this call is equivalent to calling {@link CertificateAsyncClient * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Certificate>> getCertificateWithResponse(String name, String version) { return withContext(context -> getCertificateWithResponse(name, version == null ? "" : version, context)); } /** * Gets information about the specified version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.getCertificate} * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null, this call is equivalent to calling {@link CertificateAsyncClient * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link Certificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Certificate> getCertificate(String name, String version) { return withContext(context -> getCertificateWithResponse(name, version == null ? "" : version, context)).flatMap(FluxUtil::toMono); } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its tags and enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.updateCertificate * * @param certificate The {@link CertificateBase base certificate} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateBase * @throws HttpRequestException if {@link CertificateBase * @return A {@link Mono} containing the {@link CertificateBase updated certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Certificate> updateCertificate(CertificateBase certificate) { return withContext(context -> updateCertificateWithResponse(certificate, context)).flatMap(FluxUtil::toMono); } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged.. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.updateCertificateWithResponse * * @param certificate The {@link CertificateBase base certificate} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateBase * @throws HttpRequestException if {@link CertificateBase * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Certificate>> updateCertificateWithResponse(CertificateBase certificate) { return withContext(context -> updateCertificateWithResponse(certificate, context)); } Mono<Response<Certificate>> updateCertificateWithResponse(CertificateBase certificateBase, Context context) { Objects.requireNonNull(certificateBase, "The certificate input parameter cannot be null"); CertificateUpdateParameters parameters = new CertificateUpdateParameters() .tags(certificateBase.tags()) .certificateAttributes(new CertificateRequestAttributes(certificateBase)); return service.updateCertificate(endpoint, certificateBase.name(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Updating certificate - {}", certificateBase.name())) .doOnSuccess(response -> logger.info("Updated the certificate - {}", certificateBase.name())) .doOnError(error -> logger.warning("Failed to update the certificate - {}", certificateBase.name(), error)); } /** * Gets information about the certificate which represents {@link CertificateBase certificateBase} from the key vault. This * operation requires the certificates/get permission. * * <p>The list operations {@link CertificateAsyncClient * the {@link Flux} containing {@link CertificateBase base certificate} as output excluding the properties like secretId and keyId of the certificate. * This operation can then be used to get the full certificate with its properties excluding the policy from {@code certificateBase}.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.getCertificate * * @param certificateBase The {@link CertificateBase base certificate} holding attributes of the certificate being requested. * @throws ResourceNotFoundException when a certificate with {@link CertificateBase * @throws HttpRequestException if {@link CertificateBase
class CertificateAsyncClient { static final String API_VERSION = "7.0"; static final String ACCEPT_LANGUAGE = "en-US"; static final int DEFAULT_MAX_PAGE_RESULTS = 25; static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private final String endpoint; private final CertificateService service; private final ClientLogger logger = new ClientLogger(CertificateAsyncClient.class); /** * Creates a CertificateAsyncClient that uses {@code pipeline} to service requests * * @param endpoint URL for the Azure KeyVault service. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. */ CertificateAsyncClient(URL endpoint, HttpPipeline pipeline) { Objects.requireNonNull(endpoint, KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED)); this.endpoint = endpoint.toString(); this.service = RestProxy.create(CertificateService.class, pipeline); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link Poller poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @param tags The application specific metadata to set. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link Poller} polling on the create certificate operation status. */ private Consumer<Poller<CertificateOperation>> cancelOperation(String name) { return poller -> withContext(context -> cancelCertificateOperationWithResponse(name, context)); } private Supplier<Mono<CertificateOperation>> activationOperation(String name, CertificatePolicy policy, Map<String, String> tags) { return () -> withContext(context -> createCertificateWithResponse(name, policy, tags, context) .flatMap(certificateOperationResponse -> Mono.just(certificateOperationResponse.value()))); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link Poller poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link Poller} polling on the create certificate operation status. */ public Poller<CertificateOperation> createCertificate(String name, CertificatePolicy policy) { return createCertificate(name, policy, null); } /* Polling operation to poll on create certificate operation status. */ private Function<PollResponse<CertificateOperation>, Mono<PollResponse<CertificateOperation>>> createPollOperation(String certificateName) { return prePollResponse -> { try { return withContext(context -> service.getCertificateOperation(endpoint, certificateName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .flatMap(this::processCertificateOperationResponse)); } catch (HttpRequestException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(PollResponse.OperationStatus.FAILED, null)); } }; } private Mono<PollResponse<CertificateOperation>> processCertificateOperationResponse(Response<CertificateOperation> certificateOperationResponse) { PollResponse.OperationStatus status = null; switch (certificateOperationResponse.value().status()) { case "inProgress": status = PollResponse.OperationStatus.IN_PROGRESS; break; case "completed": status = PollResponse.OperationStatus.SUCCESSFULLY_COMPLETED; break; case "failed": status = PollResponse.OperationStatus.FAILED; break; default: break; } return Mono.just(new PollResponse<>(status, certificateOperationResponse.value())); } Mono<Response<CertificateOperation>> createCertificateWithResponse(String name, CertificatePolicy certificatePolicy, Map<String, String> tags, Context context) { CertificateRequestParameters certificateRequestParameters = new CertificateRequestParameters() .certificatePolicy(new CertificatePolicyRequest(certificatePolicy)) .tags(tags); return service.createCertificate(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, certificateRequestParameters, CONTENT_TYPE_HEADER_VALUE, context); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateWithPolicy * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link Certificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Certificate> getCertificateWithPolicy(String name) { return withContext(context -> getCertificateWithResponse(name, "", context)).flatMap(FluxUtil::toMono); } Mono<Response<Certificate>> getCertificateWithResponse(String name, String version, Context context) { return service.getCertificate(endpoint, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.value().name())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null, this call is equivalent to calling {@link CertificateAsyncClient * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Certificate>> getCertificateWithResponse(String name, String version) { return withContext(context -> getCertificateWithResponse(name, version == null ? "" : version, context)); } /** * Gets information about the specified version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificate * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null, this call is equivalent to calling {@link CertificateAsyncClient * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link Certificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Certificate> getCertificate(String name, String version) { return withContext(context -> getCertificateWithResponse(name, version == null ? "" : version, context)).flatMap(FluxUtil::toMono); } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its tags and enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificate * * @param certificate The {@link CertificateBase} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateBase * @throws HttpRequestException if {@link CertificateBase * @return A {@link Mono} containing the {@link CertificateBase updated certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Certificate> updateCertificate(CertificateBase certificate) { return withContext(context -> updateCertificateWithResponse(certificate, context)).flatMap(FluxUtil::toMono); } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificateWithResponse * * @param certificate The {@link CertificateBase} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateBase * @throws HttpRequestException if {@link CertificateBase * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Certificate>> updateCertificateWithResponse(CertificateBase certificate) { return withContext(context -> updateCertificateWithResponse(certificate, context)); } Mono<Response<Certificate>> updateCertificateWithResponse(CertificateBase certificateBase, Context context) { Objects.requireNonNull(certificateBase, "The certificate input parameter cannot be null"); CertificateUpdateParameters parameters = new CertificateUpdateParameters() .tags(certificateBase.tags()) .certificateAttributes(new CertificateRequestAttributes(certificateBase)); return service.updateCertificate(endpoint, certificateBase.name(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Updating certificate - {}", certificateBase.name())) .doOnSuccess(response -> logger.info("Updated the certificate - {}", certificateBase.name())) .doOnError(error -> logger.warning("Failed to update the certificate - {}", certificateBase.name(), error)); } /** * Gets information about the certificate which represents the {@link CertificateBase} from the key vault. This * operation requires the certificates/get permission. * * <p>The list operations {@link CertificateAsyncClient * the {@link Flux} containing {@link CertificateBase} as output excluding the properties like secretId and keyId of the certificate. * This operation can then be used to get the full certificate with its properties excluding the policy from {@code certificateBase}.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificate * * @param certificateBase The {@link CertificateBase} holding attributes of the certificate being requested. * @throws ResourceNotFoundException when a certificate with {@link CertificateBase * @throws HttpRequestException if {@link CertificateBase
back to 1 second now
public Poller<CertificateOperation> createCertificate(String name, CertificatePolicy policy, Map<String, String> tags) { return new Poller<CertificateOperation>(Duration.ofSeconds(1), createPollOperation(name), actvationOperation(name, policy, tags), cancelOperation(name)); }
return new Poller<CertificateOperation>(Duration.ofSeconds(1), createPollOperation(name), actvationOperation(name, policy, tags), cancelOperation(name));
public Poller<CertificateOperation> createCertificate(String name, CertificatePolicy policy, Map<String, String> tags) { return new Poller<CertificateOperation>(Duration.ofSeconds(1), createPollOperation(name), activationOperation(name, policy, tags), cancelOperation(name)); }
class CertificateAsyncClient { static final String API_VERSION = "7.0"; static final String ACCEPT_LANGUAGE = "en-US"; static final int DEFAULT_MAX_PAGE_RESULTS = 25; static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private String endpoint; private final CertificateService service; private final ClientLogger logger = new ClientLogger(CertificateAsyncClient.class); /** * Creates a CertificateAsyncClient that uses {@code pipeline} to service requests * * @param endpoint URL for the Azure KeyVault service. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. */ CertificateAsyncClient(URL endpoint, HttpPipeline pipeline) { Objects.requireNonNull(endpoint, KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED)); this.endpoint = endpoint.toString(); this.service = RestProxy.create(CertificateService.class, pipeline); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link Poller poller} allows to automatically poll on crate certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @param tags The application specific metadata to set. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link Poller} polling on the create certificate operation status. */ private Consumer<Poller<CertificateOperation>> cancelOperation(String name) { return poller -> { service.updateCertificateOperation(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, new CertificateOperationUpdateParameter().cancellationRequested(true), CONTENT_TYPE_HEADER_VALUE, Context.NONE); }; } private Supplier<Mono<CertificateOperation>> actvationOperation(String name, CertificatePolicy policy, Map<String, String> tags) { return () -> createCertificateWithResponse(name, policy, tags) .flatMap(certificateOperationResponse -> { System.out.println("Activation function"); return Mono.just(certificateOperationResponse.value()); }); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link Poller poller} allows to automatically poll on crate certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.createCertificate} * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link Poller} polling on the create certificate operation status. */ public Poller<CertificateOperation> createCertificate(String name, CertificatePolicy policy) { return createCertificate(name, policy, null); } /* Polling operation to poll on create certificate operation status. */ private Function<PollResponse<CertificateOperation>, Mono<PollResponse<CertificateOperation>>> createPollOperation(String certificateName) { return prePollResponse -> { try { System.out.println("Calling Polling Function"); return service.getCertificateOperation(endpoint, certificateName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) .flatMap(this::processCertificateOperationResponse); } catch (HttpRequestException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(PollResponse.OperationStatus.FAILED, null)); } }; } private Mono<PollResponse<CertificateOperation>> processCertificateOperationResponse(Response<CertificateOperation> certificateOperationResponse) { PollResponse.OperationStatus status = null; switch (certificateOperationResponse.value().status()) { case "inProgress": status = PollResponse.OperationStatus.IN_PROGRESS; break; case "completed": status = PollResponse.OperationStatus.SUCCESSFULLY_COMPLETED; break; case "failed": status = PollResponse.OperationStatus.FAILED; break; } return Mono.just(new PollResponse<>(status, certificateOperationResponse.value())); } Mono<Response<CertificateOperation>> createCertificateWithResponse(String name, CertificatePolicy certificatePolicy, Map<String, String> tags) { CertificateRequestParameters certificateRequestParameters = new CertificateRequestParameters() .certificatePolicy(new CertificatePolicyRequest(certificatePolicy)) .tags(tags); return service.createCertificate(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, certificateRequestParameters, CONTENT_TYPE_HEADER_VALUE); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.getCertificateWithPolicy} * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link Certificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Certificate> getCertificateWithPolicy(String name) { return withContext(context -> getCertificateWithResponse(name, "", context)).flatMap(FluxUtil::toMono); } Mono<Response<Certificate>> getCertificateWithResponse(String name, String version, Context context) { return service.getCertificate(endpoint, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.value().name())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.getCertificateWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null, this call is equivalent to calling {@link CertificateAsyncClient * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Certificate>> getCertificateWithResponse(String name, String version) { return withContext(context -> getCertificateWithResponse(name, version == null ? "" : version, context)); } /** * Gets information about the specified version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.getCertificate} * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null, this call is equivalent to calling {@link CertificateAsyncClient * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link Certificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Certificate> getCertificate(String name, String version) { return withContext(context -> getCertificateWithResponse(name, version == null ? "" : version, context)).flatMap(FluxUtil::toMono); } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its tags and enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.updateCertificate * * @param certificate The {@link CertificateBase base certificate} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateBase * @throws HttpRequestException if {@link CertificateBase * @return A {@link Mono} containing the {@link CertificateBase updated certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Certificate> updateCertificate(CertificateBase certificate) { return withContext(context -> updateCertificateWithResponse(certificate, context)).flatMap(FluxUtil::toMono); } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged.. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.updateCertificateWithResponse * * @param certificate The {@link CertificateBase base certificate} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateBase * @throws HttpRequestException if {@link CertificateBase * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Certificate>> updateCertificateWithResponse(CertificateBase certificate) { return withContext(context -> updateCertificateWithResponse(certificate, context)); } Mono<Response<Certificate>> updateCertificateWithResponse(CertificateBase certificateBase, Context context) { Objects.requireNonNull(certificateBase, "The certificate input parameter cannot be null"); CertificateUpdateParameters parameters = new CertificateUpdateParameters() .tags(certificateBase.tags()) .certificateAttributes(new CertificateRequestAttributes(certificateBase)); return service.updateCertificate(endpoint, certificateBase.name(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Updating certificate - {}", certificateBase.name())) .doOnSuccess(response -> logger.info("Updated the certificate - {}", certificateBase.name())) .doOnError(error -> logger.warning("Failed to update the certificate - {}", certificateBase.name(), error)); } /** * Gets information about the certificate which represents {@link CertificateBase certificateBase} from the key vault. This * operation requires the certificates/get permission. * * <p>The list operations {@link CertificateAsyncClient * the {@link Flux} containing {@link CertificateBase base certificate} as output excluding the properties like secretId and keyId of the certificate. * This operation can then be used to get the full certificate with its properties excluding the policy from {@code certificateBase}.</p> * * {@codesnippet com.azure.security.keyvault.certificates.async.certificateclient.getCertificate * * @param certificateBase The {@link CertificateBase base certificate} holding attributes of the certificate being requested. * @throws ResourceNotFoundException when a certificate with {@link CertificateBase * @throws HttpRequestException if {@link CertificateBase
class CertificateAsyncClient { static final String API_VERSION = "7.0"; static final String ACCEPT_LANGUAGE = "en-US"; static final int DEFAULT_MAX_PAGE_RESULTS = 25; static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private final String endpoint; private final CertificateService service; private final ClientLogger logger = new ClientLogger(CertificateAsyncClient.class); /** * Creates a CertificateAsyncClient that uses {@code pipeline} to service requests * * @param endpoint URL for the Azure KeyVault service. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. */ CertificateAsyncClient(URL endpoint, HttpPipeline pipeline) { Objects.requireNonNull(endpoint, KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED)); this.endpoint = endpoint.toString(); this.service = RestProxy.create(CertificateService.class, pipeline); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link Poller poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @param tags The application specific metadata to set. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link Poller} polling on the create certificate operation status. */ private Consumer<Poller<CertificateOperation>> cancelOperation(String name) { return poller -> withContext(context -> cancelCertificateOperationWithResponse(name, context)); } private Supplier<Mono<CertificateOperation>> activationOperation(String name, CertificatePolicy policy, Map<String, String> tags) { return () -> withContext(context -> createCertificateWithResponse(name, policy, tags, context) .flatMap(certificateOperationResponse -> Mono.just(certificateOperationResponse.value()))); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link Poller poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link Poller} polling on the create certificate operation status. */ public Poller<CertificateOperation> createCertificate(String name, CertificatePolicy policy) { return createCertificate(name, policy, null); } /* Polling operation to poll on create certificate operation status. */ private Function<PollResponse<CertificateOperation>, Mono<PollResponse<CertificateOperation>>> createPollOperation(String certificateName) { return prePollResponse -> { try { return withContext(context -> service.getCertificateOperation(endpoint, certificateName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .flatMap(this::processCertificateOperationResponse)); } catch (HttpRequestException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(PollResponse.OperationStatus.FAILED, null)); } }; } private Mono<PollResponse<CertificateOperation>> processCertificateOperationResponse(Response<CertificateOperation> certificateOperationResponse) { PollResponse.OperationStatus status = null; switch (certificateOperationResponse.value().status()) { case "inProgress": status = PollResponse.OperationStatus.IN_PROGRESS; break; case "completed": status = PollResponse.OperationStatus.SUCCESSFULLY_COMPLETED; break; case "failed": status = PollResponse.OperationStatus.FAILED; break; default: break; } return Mono.just(new PollResponse<>(status, certificateOperationResponse.value())); } Mono<Response<CertificateOperation>> createCertificateWithResponse(String name, CertificatePolicy certificatePolicy, Map<String, String> tags, Context context) { CertificateRequestParameters certificateRequestParameters = new CertificateRequestParameters() .certificatePolicy(new CertificatePolicyRequest(certificatePolicy)) .tags(tags); return service.createCertificate(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, certificateRequestParameters, CONTENT_TYPE_HEADER_VALUE, context); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateWithPolicy * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link Certificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Certificate> getCertificateWithPolicy(String name) { return withContext(context -> getCertificateWithResponse(name, "", context)).flatMap(FluxUtil::toMono); } Mono<Response<Certificate>> getCertificateWithResponse(String name, String version, Context context) { return service.getCertificate(endpoint, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.value().name())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null, this call is equivalent to calling {@link CertificateAsyncClient * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Certificate>> getCertificateWithResponse(String name, String version) { return withContext(context -> getCertificateWithResponse(name, version == null ? "" : version, context)); } /** * Gets information about the specified version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificate * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null, this call is equivalent to calling {@link CertificateAsyncClient * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link Certificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Certificate> getCertificate(String name, String version) { return withContext(context -> getCertificateWithResponse(name, version == null ? "" : version, context)).flatMap(FluxUtil::toMono); } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its tags and enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificate * * @param certificate The {@link CertificateBase} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateBase * @throws HttpRequestException if {@link CertificateBase * @return A {@link Mono} containing the {@link CertificateBase updated certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Certificate> updateCertificate(CertificateBase certificate) { return withContext(context -> updateCertificateWithResponse(certificate, context)).flatMap(FluxUtil::toMono); } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificateWithResponse * * @param certificate The {@link CertificateBase} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateBase * @throws HttpRequestException if {@link CertificateBase * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Certificate>> updateCertificateWithResponse(CertificateBase certificate) { return withContext(context -> updateCertificateWithResponse(certificate, context)); } Mono<Response<Certificate>> updateCertificateWithResponse(CertificateBase certificateBase, Context context) { Objects.requireNonNull(certificateBase, "The certificate input parameter cannot be null"); CertificateUpdateParameters parameters = new CertificateUpdateParameters() .tags(certificateBase.tags()) .certificateAttributes(new CertificateRequestAttributes(certificateBase)); return service.updateCertificate(endpoint, certificateBase.name(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Updating certificate - {}", certificateBase.name())) .doOnSuccess(response -> logger.info("Updated the certificate - {}", certificateBase.name())) .doOnError(error -> logger.warning("Failed to update the certificate - {}", certificateBase.name(), error)); } /** * Gets information about the certificate which represents the {@link CertificateBase} from the key vault. This * operation requires the certificates/get permission. * * <p>The list operations {@link CertificateAsyncClient * the {@link Flux} containing {@link CertificateBase} as output excluding the properties like secretId and keyId of the certificate. * This operation can then be used to get the full certificate with its properties excluding the policy from {@code certificateBase}.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificate * * @param certificateBase The {@link CertificateBase} holding attributes of the certificate being requested. * @throws ResourceNotFoundException when a certificate with {@link CertificateBase * @throws HttpRequestException if {@link CertificateBase
Causing Javadoc generation issues since the line is over 120 characters. Also for all these existence calls can we just use `.value()` as Boolean toString will implicitly use the boolean value.
public void existsWithResponse() { client.existsWithResponse().subscribe(response -> System.out.printf("Exists? %b%n", response.value().booleanValue())); }
client.existsWithResponse().subscribe(response -> System.out.printf("Exists? %b%n", response.value().booleanValue()));
public void existsWithResponse() { client.existsWithResponse().subscribe(response -> System.out.printf("Exists? %b%n", response.value())); }
class ContainerAsyncClientJavaDocCodeSnippets { private ContainerAsyncClient client = JavaDocCodeSnippetsHelpers.getContainerAsyncClient(); private String blobName = "blobName"; private String snapshot = "snapshot"; private String leaseId = "leaseId"; private String proposedId = "proposedId"; private int leaseDuration = (int) Duration.ofSeconds(30).getSeconds(); /** * Code snippet for {@link ContainerAsyncClient * ContainerSASPermission, OffsetDateTime, OffsetDateTime, String, SASProtocol, IPRange, String, String, String, * String, String)} */ public void generateUserDelegationSASCodeSnippets() { ContainerSASPermission permissions = new ContainerSASPermission() .read(true) .write(true) .create(true) .delete(true) .add(true) .list(true); OffsetDateTime startTime = OffsetDateTime.now().minusDays(1); OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1); IPRange ipRange = new IPRange() .ipMin("0.0.0.0") .ipMax("255.255.255.255"); SASProtocol sasProtocol = SASProtocol.HTTPS_HTTP; String cacheControl = "cache"; String contentDisposition = "disposition"; String contentEncoding = "encoding"; String contentLanguage = "language"; String contentType = "type"; String version = Constants.HeaderConstants.TARGET_STORAGE_VERSION; String accountName = "accountName"; UserDelegationKey userDelegationKey = new UserDelegationKey(); String sas = client.generateUserDelegationSAS(userDelegationKey, accountName, permissions, expiryTime, startTime, version, sasProtocol, ipRange, cacheControl, contentDisposition, contentEncoding, contentLanguage, contentType); } /** * Code snippet for {@link ContainerAsyncClient * OffsetDateTime, String, SASProtocol, IPRange, String, String, String, String, String)} */ public void generateSASCodeSnippets() { ContainerSASPermission permissions = new ContainerSASPermission() .read(true) .write(true) .create(true) .delete(true) .add(true) .list(true); OffsetDateTime startTime = OffsetDateTime.now().minusDays(1); OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1); IPRange ipRange = new IPRange() .ipMin("0.0.0.0") .ipMax("255.255.255.255"); SASProtocol sasProtocol = SASProtocol.HTTPS_HTTP; String cacheControl = "cache"; String contentDisposition = "disposition"; String contentEncoding = "encoding"; String contentLanguage = "language"; String contentType = "type"; String identifier = "identifier"; String version = Constants.HeaderConstants.TARGET_STORAGE_VERSION; String sas = client.generateSAS(identifier, permissions, expiryTime, startTime, version, sasProtocol, ipRange, cacheControl, contentDisposition, contentEncoding, contentLanguage, contentType); } /** * Code snippet for {@link ContainerAsyncClient */ public void getBlobAsyncClient() { BlobAsyncClient blobAsyncClient = client.getBlobAsyncClient(blobName); } /** * Code snippet for {@link ContainerAsyncClient */ public void getSnapshotBlobAsyncClient() { BlobAsyncClient blobAsyncClient = client.getBlobAsyncClient(blobName, snapshot); } /** * Code snippet for {@link ContainerAsyncClient */ public void getAppendBlobAsyncClient() { AppendBlobAsyncClient appendBlobAsyncClient = client.getAppendBlobAsyncClient(blobName); } /** * Code snippet for {@link ContainerAsyncClient */ public void getSnapshotAppendBlobAsyncClient() { AppendBlobAsyncClient appendBlobAsyncClient = client.getAppendBlobAsyncClient(blobName, snapshot); } /** * Code snippet for {@link ContainerAsyncClient */ public void getBlockBlobAsyncClient() { BlockBlobAsyncClient blockBlobAsyncClient = client.getBlockBlobAsyncClient(blobName); } /** * Code snippet for {@link ContainerAsyncClient */ public void getSnapshotBlockBlobAsyncClient() { BlockBlobAsyncClient blockBlobAsyncClient = client.getBlockBlobAsyncClient(blobName, snapshot); } /** * Code snippet for {@link ContainerAsyncClient */ public void getPageBlobAsyncClient() { PageBlobAsyncClient pageBlobAsyncClient = client.getPageBlobAsyncClient(blobName); } /** * Code snippet for {@link ContainerAsyncClient */ public void getSnapshotPageBlobAsyncClient() { PageBlobAsyncClient pageBlobAsyncClient = client.getPageBlobAsyncClient(blobName, snapshot); } /** * Code snippet for {@link ContainerAsyncClient */ public void exists() { client.exists().subscribe(response -> System.out.printf("Exists? %b%n", response.booleanValue())); } /** * Code snippet for {@link ContainerAsyncClient */ /** * Code snippet for {@link ContainerAsyncClient */ public void existsWithResponse2() { Context context = new Context("key", "value"); client.existsWithResponse(context).subscribe(response -> System.out.printf("Exists? %b%n", response.value().booleanValue())); } /** * Code snippet for {@link ContainerAsyncClient */ public void create() { client.create().subscribe( response -> System.out.printf("Create completed%n"), error -> System.out.printf("Error while creating container %s%n", error)); } /** * Code snippet for {@link ContainerAsyncClient */ public void create2() { Metadata metadata = new Metadata(Collections.singletonMap("metadata", "value")); client.createWithResponse(metadata, PublicAccessType.CONTAINER).subscribe(response -> System.out.printf("Create completed with status %d%n", response.statusCode())); } /** * Code snippet for {@link ContainerAsyncClient */ public void delete() { client.delete().subscribe( response -> System.out.printf("Delete completed%n"), error -> System.out.printf("Delete failed: %s%n", error)); } /** * Code snippet for {@link ContainerAsyncClient */ public void delete2() { ContainerAccessConditions accessConditions = new ContainerAccessConditions() .leaseAccessConditions(new LeaseAccessConditions().leaseId(leaseId)) .modifiedAccessConditions(new ModifiedAccessConditions() .ifUnmodifiedSince(OffsetDateTime.now().minusDays(3))); client.deleteWithResponse(accessConditions).subscribe(response -> System.out.printf("Delete completed with status %d%n", response.statusCode())); } /** * Code snippet for {@link ContainerAsyncClient */ public void getProperties() { client.getProperties().subscribe(response -> System.out.printf("Public Access Type: %s, Legal Hold? %b, Immutable? %b%n", response.blobPublicAccess(), response.hasLegalHold(), response.hasImmutabilityPolicy())); } /** * Code snippet for {@link ContainerAsyncClient */ public void getProperties2() { LeaseAccessConditions accessConditions = new LeaseAccessConditions().leaseId(leaseId); client.getPropertiesWithResponse(accessConditions).subscribe(response -> System.out.printf("Public Access Type: %s, Legal Hold? %b, Immutable? %b%n", response.value().blobPublicAccess(), response.value().hasLegalHold(), response.value().hasImmutabilityPolicy())); } /** * Code snippet for {@link ContainerAsyncClient */ public void setMetadata() { Metadata metadata = new Metadata(Collections.singletonMap("metadata", "value")); client.setMetadata(metadata).subscribe( response -> System.out.printf("Set metadata completed%n"), error -> System.out.printf("Set metadata failed: %s%n", error)); } /** * Code snippet for {@link ContainerAsyncClient */ public void setMetadata2() { Metadata metadata = new Metadata(Collections.singletonMap("metadata", "value")); ContainerAccessConditions accessConditions = new ContainerAccessConditions() .leaseAccessConditions(new LeaseAccessConditions().leaseId(leaseId)) .modifiedAccessConditions(new ModifiedAccessConditions() .ifUnmodifiedSince(OffsetDateTime.now().minusDays(3))); client.setMetadataWithResponse(metadata, accessConditions).subscribe(response -> System.out.printf("Set metadata completed with status %d%n", response.statusCode())); } /** * Code snippet for {@link ContainerAsyncClient */ public void getAccessPolicy() { client.getAccessPolicy().subscribe(response -> { System.out.printf("Blob Access Type: %s%n", response.getBlobAccessType()); for (SignedIdentifier identifier : response.getIdentifiers()) { System.out.printf("Identifier Name: %s, Permissions %s%n", identifier.id(), identifier.accessPolicy().permission()); } }); } /** * Code snippet for {@link ContainerAsyncClient */ public void getAccessPolicy2() { LeaseAccessConditions accessConditions = new LeaseAccessConditions().leaseId(leaseId); client.getAccessPolicyWithResponse(accessConditions).subscribe(response -> { System.out.printf("Blob Access Type: %s%n", response.value().getBlobAccessType()); for (SignedIdentifier identifier : response.value().getIdentifiers()) { System.out.printf("Identifier Name: %s, Permissions %s%n", identifier.id(), identifier.accessPolicy().permission()); } }); } /** * Code snippet for {@link ContainerAsyncClient */ public void setAccessPolicy() { SignedIdentifier identifier = new SignedIdentifier() .id("name") .accessPolicy(new AccessPolicy() .start(OffsetDateTime.now()) .expiry(OffsetDateTime.now().plusDays(7)) .permission("permissionString")); client.setAccessPolicy(PublicAccessType.CONTAINER, Collections.singletonList(identifier)).subscribe( response -> System.out.printf("Set access policy completed%n"), error -> System.out.printf("Set access policy failed: %s%n", error)); } /** * Code snippet for {@link ContainerAsyncClient */ public void setAccessPolicy2() { SignedIdentifier identifier = new SignedIdentifier() .id("name") .accessPolicy(new AccessPolicy() .start(OffsetDateTime.now()) .expiry(OffsetDateTime.now().plusDays(7)) .permission("permissionString")); ContainerAccessConditions accessConditions = new ContainerAccessConditions() .leaseAccessConditions(new LeaseAccessConditions().leaseId(leaseId)) .modifiedAccessConditions(new ModifiedAccessConditions() .ifUnmodifiedSince(OffsetDateTime.now().minusDays(3))); client.setAccessPolicyWithResponse(PublicAccessType.CONTAINER, Collections.singletonList(identifier), accessConditions) .subscribe(response -> System.out.printf("Set access policy completed with status %d%n", response.statusCode())); } /** * Code snippet for {@link ContainerAsyncClient */ public void listBlobsFlat() { client.listBlobsFlat().subscribe(blob -> System.out.printf("Name: %s, Directory? %b%n", blob.name(), blob.isPrefix())); } /** * Code snippet for {@link ContainerAsyncClient */ public void listBlobsFlat2() { ListBlobsOptions options = new ListBlobsOptions() .prefix("prefixToMatch") .details(new BlobListDetails() .deletedBlobs(true) .snapshots(true)); client.listBlobsFlat(options).subscribe(blob -> System.out.printf("Name: %s, Directory? %b, Deleted? %b, Snapshot ID: %s%n", blob.name(), blob.isPrefix(), blob.deleted(), blob.snapshot())); } /** * Code snippet for {@link ContainerAsyncClient */ public void listBlobsHierarchy() { client.listBlobsHierarchy("directoryName").subscribe(blob -> System.out.printf("Name: %s, Directory? %b%n", blob.name(), blob.isPrefix())); } /** * Code snippet for {@link ContainerAsyncClient */ public void listBlobsHierarchy2() { ListBlobsOptions options = new ListBlobsOptions() .prefix("directoryName") .details(new BlobListDetails() .deletedBlobs(true) .snapshots(true)); client.listBlobsHierarchy("/", options).subscribe(blob -> System.out.printf("Name: %s, Directory? %b, Deleted? %b, Snapshot ID: %s%n", blob.name(), blob.isPrefix(), blob.deleted(), blob.snapshot())); } /** * Code snippet for {@link ContainerAsyncClient */ public void acquireLease() { client.acquireLease(proposedId, leaseDuration).subscribe(response -> System.out.printf("Lease ID: %s%n", response)); } /** * Code snippet for {@link ContainerAsyncClient */ public void acquireLease2() { ModifiedAccessConditions accessConditions = new ModifiedAccessConditions() .ifUnmodifiedSince(OffsetDateTime.now().minusDays(3)); client.acquireLeaseWithResponse(proposedId, leaseDuration, accessConditions).subscribe(response -> System.out.printf("Lease ID: %s%n", response.value())); } /** * Code snippet for {@link ContainerAsyncClient */ public void renewLease() { client.renewLease(leaseId).subscribe(response -> System.out.printf("Renewed Lease ID: %s%n", response)); } /** * Code snippet for {@link ContainerAsyncClient */ public void renewLease2() { ModifiedAccessConditions accessConditions = new ModifiedAccessConditions() .ifUnmodifiedSince(OffsetDateTime.now().minusDays(3)); client.renewLeaseWithResponse(leaseId, accessConditions).subscribe(response -> System.out.printf("Renewed Lease ID: %s%n", response.value())); } /** * Code snippet for {@link ContainerAsyncClient */ public void releaseLease() { client.releaseLease(leaseId).subscribe( response -> System.out.printf("Release lease completed%n"), error -> System.out.printf("Release lease failed: %n", error)); } /** * Code snippet for {@link ContainerAsyncClient */ public void releaseLease2() { ModifiedAccessConditions accessConditions = new ModifiedAccessConditions() .ifUnmodifiedSince(OffsetDateTime.now().minusDays(3)); client.releaseLeaseWithResponse(leaseId, accessConditions).subscribe(response -> System.out.printf("Release lease completed with status %d%n", response.statusCode())); } /** * Code snippet for {@link ContainerAsyncClient */ public void breakLease() { client.breakLease().subscribe(response -> System.out.printf("Broken lease had %d seconds remaining on the lease%n", response.getSeconds())); } /** * Code snippet for {@link ContainerAsyncClient */ public void breakLease2() { ModifiedAccessConditions accessConditions = new ModifiedAccessConditions() .ifUnmodifiedSince(OffsetDateTime.now().minusDays(3)); client.breakLeaseWithResponse(10, accessConditions).subscribe(response -> System.out.printf("Broken lease had %d seconds remaining on the lease%n", response.value().getSeconds())); } /** * Code snippet for {@link ContainerAsyncClient */ public void changeLease() { client.changeLease(leaseId, proposedId).subscribe(response -> System.out.printf("Changed Lease ID: %s%n", response)); } /** * Code snippet for {@link ContainerAsyncClient */ public void changeLease2() { ModifiedAccessConditions accessConditions = new ModifiedAccessConditions() .ifUnmodifiedSince(OffsetDateTime.now().minusDays(3)); client.changeLeaseWithResponse(leaseId, proposedId, accessConditions).subscribe(response -> System.out.printf("Changed Lease ID: %s%n", response.value())); } /** * Code snippet for {@link ContainerAsyncClient */ public void getAccountInfo() { client.getAccountInfo().subscribe(response -> System.out.printf("Account Kind: %s, SKU: %s%n", response.accountKind(), response.skuName())); } /** * Code snippet for {@link ContainerAsyncClient */ public void getAccountInfo2() { client.getAccountInfoWithResponse().subscribe(response -> System.out.printf("Account Kind: %s, SKU: %s%n", response.value().accountKind(), response.value().skuName())); } }
class ContainerAsyncClientJavaDocCodeSnippets { private ContainerAsyncClient client = JavaDocCodeSnippetsHelpers.getContainerAsyncClient(); private String blobName = "blobName"; private String snapshot = "snapshot"; private String leaseId = "leaseId"; private String proposedId = "proposedId"; private int leaseDuration = (int) Duration.ofSeconds(30).getSeconds(); /** * Code snippet for {@link ContainerAsyncClient * ContainerSASPermission, OffsetDateTime, OffsetDateTime, String, SASProtocol, IPRange, String, String, String, * String, String)} */ public void generateUserDelegationSASCodeSnippets() { ContainerSASPermission permissions = new ContainerSASPermission() .read(true) .write(true) .create(true) .delete(true) .add(true) .list(true); OffsetDateTime startTime = OffsetDateTime.now().minusDays(1); OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1); IPRange ipRange = new IPRange() .ipMin("0.0.0.0") .ipMax("255.255.255.255"); SASProtocol sasProtocol = SASProtocol.HTTPS_HTTP; String cacheControl = "cache"; String contentDisposition = "disposition"; String contentEncoding = "encoding"; String contentLanguage = "language"; String contentType = "type"; String version = Constants.HeaderConstants.TARGET_STORAGE_VERSION; String accountName = "accountName"; UserDelegationKey userDelegationKey = new UserDelegationKey(); String sas = client.generateUserDelegationSAS(userDelegationKey, accountName, permissions, expiryTime, startTime, version, sasProtocol, ipRange, cacheControl, contentDisposition, contentEncoding, contentLanguage, contentType); } /** * Code snippet for {@link ContainerAsyncClient * OffsetDateTime, String, SASProtocol, IPRange, String, String, String, String, String)} */ public void generateSASCodeSnippets() { ContainerSASPermission permissions = new ContainerSASPermission() .read(true) .write(true) .create(true) .delete(true) .add(true) .list(true); OffsetDateTime startTime = OffsetDateTime.now().minusDays(1); OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1); IPRange ipRange = new IPRange() .ipMin("0.0.0.0") .ipMax("255.255.255.255"); SASProtocol sasProtocol = SASProtocol.HTTPS_HTTP; String cacheControl = "cache"; String contentDisposition = "disposition"; String contentEncoding = "encoding"; String contentLanguage = "language"; String contentType = "type"; String identifier = "identifier"; String version = Constants.HeaderConstants.TARGET_STORAGE_VERSION; String sas = client.generateSAS(identifier, permissions, expiryTime, startTime, version, sasProtocol, ipRange, cacheControl, contentDisposition, contentEncoding, contentLanguage, contentType); } /** * Code snippet for {@link ContainerAsyncClient */ public void getBlobAsyncClient() { BlobAsyncClient blobAsyncClient = client.getBlobAsyncClient(blobName); } /** * Code snippet for {@link ContainerAsyncClient */ public void getSnapshotBlobAsyncClient() { BlobAsyncClient blobAsyncClient = client.getBlobAsyncClient(blobName, snapshot); } /** * Code snippet for {@link ContainerAsyncClient */ public void getAppendBlobAsyncClient() { AppendBlobAsyncClient appendBlobAsyncClient = client.getAppendBlobAsyncClient(blobName); } /** * Code snippet for {@link ContainerAsyncClient */ public void getSnapshotAppendBlobAsyncClient() { AppendBlobAsyncClient appendBlobAsyncClient = client.getAppendBlobAsyncClient(blobName, snapshot); } /** * Code snippet for {@link ContainerAsyncClient */ public void getBlockBlobAsyncClient() { BlockBlobAsyncClient blockBlobAsyncClient = client.getBlockBlobAsyncClient(blobName); } /** * Code snippet for {@link ContainerAsyncClient */ public void getSnapshotBlockBlobAsyncClient() { BlockBlobAsyncClient blockBlobAsyncClient = client.getBlockBlobAsyncClient(blobName, snapshot); } /** * Code snippet for {@link ContainerAsyncClient */ public void getPageBlobAsyncClient() { PageBlobAsyncClient pageBlobAsyncClient = client.getPageBlobAsyncClient(blobName); } /** * Code snippet for {@link ContainerAsyncClient */ public void getSnapshotPageBlobAsyncClient() { PageBlobAsyncClient pageBlobAsyncClient = client.getPageBlobAsyncClient(blobName, snapshot); } /** * Code snippet for {@link ContainerAsyncClient */ public void exists() { client.exists().subscribe(response -> System.out.printf("Exists? %b%n", response)); } /** * Code snippet for {@link ContainerAsyncClient */ /** * Code snippet for {@link ContainerAsyncClient */ public void existsWithResponse2() { Context context = new Context("key", "value"); client.existsWithResponse(context).subscribe(response -> System.out.printf("Exists? %b%n", response.value())); } /** * Code snippet for {@link ContainerAsyncClient */ public void create() { client.create().subscribe( response -> System.out.printf("Create completed%n"), error -> System.out.printf("Error while creating container %s%n", error)); } /** * Code snippet for {@link ContainerAsyncClient */ public void create2() { Metadata metadata = new Metadata(Collections.singletonMap("metadata", "value")); client.createWithResponse(metadata, PublicAccessType.CONTAINER).subscribe(response -> System.out.printf("Create completed with status %d%n", response.statusCode())); } /** * Code snippet for {@link ContainerAsyncClient */ public void delete() { client.delete().subscribe( response -> System.out.printf("Delete completed%n"), error -> System.out.printf("Delete failed: %s%n", error)); } /** * Code snippet for {@link ContainerAsyncClient */ public void delete2() { ContainerAccessConditions accessConditions = new ContainerAccessConditions() .leaseAccessConditions(new LeaseAccessConditions().leaseId(leaseId)) .modifiedAccessConditions(new ModifiedAccessConditions() .ifUnmodifiedSince(OffsetDateTime.now().minusDays(3))); client.deleteWithResponse(accessConditions).subscribe(response -> System.out.printf("Delete completed with status %d%n", response.statusCode())); } /** * Code snippet for {@link ContainerAsyncClient */ public void getProperties() { client.getProperties().subscribe(response -> System.out.printf("Public Access Type: %s, Legal Hold? %b, Immutable? %b%n", response.blobPublicAccess(), response.hasLegalHold(), response.hasImmutabilityPolicy())); } /** * Code snippet for {@link ContainerAsyncClient */ public void getProperties2() { LeaseAccessConditions accessConditions = new LeaseAccessConditions().leaseId(leaseId); client.getPropertiesWithResponse(accessConditions).subscribe(response -> System.out.printf("Public Access Type: %s, Legal Hold? %b, Immutable? %b%n", response.value().blobPublicAccess(), response.value().hasLegalHold(), response.value().hasImmutabilityPolicy())); } /** * Code snippet for {@link ContainerAsyncClient */ public void setMetadata() { Metadata metadata = new Metadata(Collections.singletonMap("metadata", "value")); client.setMetadata(metadata).subscribe( response -> System.out.printf("Set metadata completed%n"), error -> System.out.printf("Set metadata failed: %s%n", error)); } /** * Code snippet for {@link ContainerAsyncClient */ public void setMetadata2() { Metadata metadata = new Metadata(Collections.singletonMap("metadata", "value")); ContainerAccessConditions accessConditions = new ContainerAccessConditions() .leaseAccessConditions(new LeaseAccessConditions().leaseId(leaseId)) .modifiedAccessConditions(new ModifiedAccessConditions() .ifUnmodifiedSince(OffsetDateTime.now().minusDays(3))); client.setMetadataWithResponse(metadata, accessConditions).subscribe(response -> System.out.printf("Set metadata completed with status %d%n", response.statusCode())); } /** * Code snippet for {@link ContainerAsyncClient */ public void getAccessPolicy() { client.getAccessPolicy().subscribe(response -> { System.out.printf("Blob Access Type: %s%n", response.getBlobAccessType()); for (SignedIdentifier identifier : response.getIdentifiers()) { System.out.printf("Identifier Name: %s, Permissions %s%n", identifier.id(), identifier.accessPolicy().permission()); } }); } /** * Code snippet for {@link ContainerAsyncClient */ public void getAccessPolicy2() { LeaseAccessConditions accessConditions = new LeaseAccessConditions().leaseId(leaseId); client.getAccessPolicyWithResponse(accessConditions).subscribe(response -> { System.out.printf("Blob Access Type: %s%n", response.value().getBlobAccessType()); for (SignedIdentifier identifier : response.value().getIdentifiers()) { System.out.printf("Identifier Name: %s, Permissions %s%n", identifier.id(), identifier.accessPolicy().permission()); } }); } /** * Code snippet for {@link ContainerAsyncClient */ public void setAccessPolicy() { SignedIdentifier identifier = new SignedIdentifier() .id("name") .accessPolicy(new AccessPolicy() .start(OffsetDateTime.now()) .expiry(OffsetDateTime.now().plusDays(7)) .permission("permissionString")); client.setAccessPolicy(PublicAccessType.CONTAINER, Collections.singletonList(identifier)).subscribe( response -> System.out.printf("Set access policy completed%n"), error -> System.out.printf("Set access policy failed: %s%n", error)); } /** * Code snippet for {@link ContainerAsyncClient */ public void setAccessPolicy2() { SignedIdentifier identifier = new SignedIdentifier() .id("name") .accessPolicy(new AccessPolicy() .start(OffsetDateTime.now()) .expiry(OffsetDateTime.now().plusDays(7)) .permission("permissionString")); ContainerAccessConditions accessConditions = new ContainerAccessConditions() .leaseAccessConditions(new LeaseAccessConditions().leaseId(leaseId)) .modifiedAccessConditions(new ModifiedAccessConditions() .ifUnmodifiedSince(OffsetDateTime.now().minusDays(3))); client.setAccessPolicyWithResponse(PublicAccessType.CONTAINER, Collections.singletonList(identifier), accessConditions) .subscribe(response -> System.out.printf("Set access policy completed with status %d%n", response.statusCode())); } /** * Code snippet for {@link ContainerAsyncClient */ public void listBlobsFlat() { client.listBlobsFlat().subscribe(blob -> System.out.printf("Name: %s, Directory? %b%n", blob.name(), blob.isPrefix())); } /** * Code snippet for {@link ContainerAsyncClient */ public void listBlobsFlat2() { ListBlobsOptions options = new ListBlobsOptions() .prefix("prefixToMatch") .details(new BlobListDetails() .deletedBlobs(true) .snapshots(true)); client.listBlobsFlat(options).subscribe(blob -> System.out.printf("Name: %s, Directory? %b, Deleted? %b, Snapshot ID: %s%n", blob.name(), blob.isPrefix(), blob.deleted(), blob.snapshot())); } /** * Code snippet for {@link ContainerAsyncClient */ public void listBlobsHierarchy() { client.listBlobsHierarchy("directoryName").subscribe(blob -> System.out.printf("Name: %s, Directory? %b%n", blob.name(), blob.isPrefix())); } /** * Code snippet for {@link ContainerAsyncClient */ public void listBlobsHierarchy2() { ListBlobsOptions options = new ListBlobsOptions() .prefix("directoryName") .details(new BlobListDetails() .deletedBlobs(true) .snapshots(true)); client.listBlobsHierarchy("/", options).subscribe(blob -> System.out.printf("Name: %s, Directory? %b, Deleted? %b, Snapshot ID: %s%n", blob.name(), blob.isPrefix(), blob.deleted(), blob.snapshot())); } /** * Code snippet for {@link ContainerAsyncClient */ public void acquireLease() { client.acquireLease(proposedId, leaseDuration).subscribe(response -> System.out.printf("Lease ID: %s%n", response)); } /** * Code snippet for {@link ContainerAsyncClient */ public void acquireLease2() { ModifiedAccessConditions accessConditions = new ModifiedAccessConditions() .ifUnmodifiedSince(OffsetDateTime.now().minusDays(3)); client.acquireLeaseWithResponse(proposedId, leaseDuration, accessConditions).subscribe(response -> System.out.printf("Lease ID: %s%n", response.value())); } /** * Code snippet for {@link ContainerAsyncClient */ public void renewLease() { client.renewLease(leaseId).subscribe(response -> System.out.printf("Renewed Lease ID: %s%n", response)); } /** * Code snippet for {@link ContainerAsyncClient */ public void renewLease2() { ModifiedAccessConditions accessConditions = new ModifiedAccessConditions() .ifUnmodifiedSince(OffsetDateTime.now().minusDays(3)); client.renewLeaseWithResponse(leaseId, accessConditions).subscribe(response -> System.out.printf("Renewed Lease ID: %s%n", response.value())); } /** * Code snippet for {@link ContainerAsyncClient */ public void releaseLease() { client.releaseLease(leaseId).subscribe( response -> System.out.printf("Release lease completed%n"), error -> System.out.printf("Release lease failed: %n", error)); } /** * Code snippet for {@link ContainerAsyncClient */ public void releaseLease2() { ModifiedAccessConditions accessConditions = new ModifiedAccessConditions() .ifUnmodifiedSince(OffsetDateTime.now().minusDays(3)); client.releaseLeaseWithResponse(leaseId, accessConditions).subscribe(response -> System.out.printf("Release lease completed with status %d%n", response.statusCode())); } /** * Code snippet for {@link ContainerAsyncClient */ public void breakLease() { client.breakLease().subscribe(response -> System.out.printf("Broken lease had %d seconds remaining on the lease%n", response.getSeconds())); } /** * Code snippet for {@link ContainerAsyncClient */ public void breakLease2() { ModifiedAccessConditions accessConditions = new ModifiedAccessConditions() .ifUnmodifiedSince(OffsetDateTime.now().minusDays(3)); client.breakLeaseWithResponse(10, accessConditions).subscribe(response -> System.out.printf("Broken lease had %d seconds remaining on the lease%n", response.value().getSeconds())); } /** * Code snippet for {@link ContainerAsyncClient */ public void changeLease() { client.changeLease(leaseId, proposedId).subscribe(response -> System.out.printf("Changed Lease ID: %s%n", response)); } /** * Code snippet for {@link ContainerAsyncClient */ public void changeLease2() { ModifiedAccessConditions accessConditions = new ModifiedAccessConditions() .ifUnmodifiedSince(OffsetDateTime.now().minusDays(3)); client.changeLeaseWithResponse(leaseId, proposedId, accessConditions).subscribe(response -> System.out.printf("Changed Lease ID: %s%n", response.value())); } /** * Code snippet for {@link ContainerAsyncClient */ public void getAccountInfo() { client.getAccountInfo().subscribe(response -> System.out.printf("Account Kind: %s, SKU: %s%n", response.accountKind(), response.skuName())); } /** * Code snippet for {@link ContainerAsyncClient */ public void getAccountInfo2() { client.getAccountInfoWithResponse().subscribe(response -> System.out.printf("Account Kind: %s, SKU: %s%n", response.value().accountKind(), response.value().skuName())); } }
updated
public void existsWithResponse() { client.existsWithResponse().subscribe(response -> System.out.printf("Exists? %b%n", response.value().booleanValue())); }
client.existsWithResponse().subscribe(response -> System.out.printf("Exists? %b%n", response.value().booleanValue()));
public void existsWithResponse() { client.existsWithResponse().subscribe(response -> System.out.printf("Exists? %b%n", response.value())); }
class ContainerAsyncClientJavaDocCodeSnippets { private ContainerAsyncClient client = JavaDocCodeSnippetsHelpers.getContainerAsyncClient(); private String blobName = "blobName"; private String snapshot = "snapshot"; private String leaseId = "leaseId"; private String proposedId = "proposedId"; private int leaseDuration = (int) Duration.ofSeconds(30).getSeconds(); /** * Code snippet for {@link ContainerAsyncClient * ContainerSASPermission, OffsetDateTime, OffsetDateTime, String, SASProtocol, IPRange, String, String, String, * String, String)} */ public void generateUserDelegationSASCodeSnippets() { ContainerSASPermission permissions = new ContainerSASPermission() .read(true) .write(true) .create(true) .delete(true) .add(true) .list(true); OffsetDateTime startTime = OffsetDateTime.now().minusDays(1); OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1); IPRange ipRange = new IPRange() .ipMin("0.0.0.0") .ipMax("255.255.255.255"); SASProtocol sasProtocol = SASProtocol.HTTPS_HTTP; String cacheControl = "cache"; String contentDisposition = "disposition"; String contentEncoding = "encoding"; String contentLanguage = "language"; String contentType = "type"; String version = Constants.HeaderConstants.TARGET_STORAGE_VERSION; String accountName = "accountName"; UserDelegationKey userDelegationKey = new UserDelegationKey(); String sas = client.generateUserDelegationSAS(userDelegationKey, accountName, permissions, expiryTime, startTime, version, sasProtocol, ipRange, cacheControl, contentDisposition, contentEncoding, contentLanguage, contentType); } /** * Code snippet for {@link ContainerAsyncClient * OffsetDateTime, String, SASProtocol, IPRange, String, String, String, String, String)} */ public void generateSASCodeSnippets() { ContainerSASPermission permissions = new ContainerSASPermission() .read(true) .write(true) .create(true) .delete(true) .add(true) .list(true); OffsetDateTime startTime = OffsetDateTime.now().minusDays(1); OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1); IPRange ipRange = new IPRange() .ipMin("0.0.0.0") .ipMax("255.255.255.255"); SASProtocol sasProtocol = SASProtocol.HTTPS_HTTP; String cacheControl = "cache"; String contentDisposition = "disposition"; String contentEncoding = "encoding"; String contentLanguage = "language"; String contentType = "type"; String identifier = "identifier"; String version = Constants.HeaderConstants.TARGET_STORAGE_VERSION; String sas = client.generateSAS(identifier, permissions, expiryTime, startTime, version, sasProtocol, ipRange, cacheControl, contentDisposition, contentEncoding, contentLanguage, contentType); } /** * Code snippet for {@link ContainerAsyncClient */ public void getBlobAsyncClient() { BlobAsyncClient blobAsyncClient = client.getBlobAsyncClient(blobName); } /** * Code snippet for {@link ContainerAsyncClient */ public void getSnapshotBlobAsyncClient() { BlobAsyncClient blobAsyncClient = client.getBlobAsyncClient(blobName, snapshot); } /** * Code snippet for {@link ContainerAsyncClient */ public void getAppendBlobAsyncClient() { AppendBlobAsyncClient appendBlobAsyncClient = client.getAppendBlobAsyncClient(blobName); } /** * Code snippet for {@link ContainerAsyncClient */ public void getSnapshotAppendBlobAsyncClient() { AppendBlobAsyncClient appendBlobAsyncClient = client.getAppendBlobAsyncClient(blobName, snapshot); } /** * Code snippet for {@link ContainerAsyncClient */ public void getBlockBlobAsyncClient() { BlockBlobAsyncClient blockBlobAsyncClient = client.getBlockBlobAsyncClient(blobName); } /** * Code snippet for {@link ContainerAsyncClient */ public void getSnapshotBlockBlobAsyncClient() { BlockBlobAsyncClient blockBlobAsyncClient = client.getBlockBlobAsyncClient(blobName, snapshot); } /** * Code snippet for {@link ContainerAsyncClient */ public void getPageBlobAsyncClient() { PageBlobAsyncClient pageBlobAsyncClient = client.getPageBlobAsyncClient(blobName); } /** * Code snippet for {@link ContainerAsyncClient */ public void getSnapshotPageBlobAsyncClient() { PageBlobAsyncClient pageBlobAsyncClient = client.getPageBlobAsyncClient(blobName, snapshot); } /** * Code snippet for {@link ContainerAsyncClient */ public void exists() { client.exists().subscribe(response -> System.out.printf("Exists? %b%n", response.booleanValue())); } /** * Code snippet for {@link ContainerAsyncClient */ /** * Code snippet for {@link ContainerAsyncClient */ public void existsWithResponse2() { Context context = new Context("key", "value"); client.existsWithResponse(context).subscribe(response -> System.out.printf("Exists? %b%n", response.value().booleanValue())); } /** * Code snippet for {@link ContainerAsyncClient */ public void create() { client.create().subscribe( response -> System.out.printf("Create completed%n"), error -> System.out.printf("Error while creating container %s%n", error)); } /** * Code snippet for {@link ContainerAsyncClient */ public void create2() { Metadata metadata = new Metadata(Collections.singletonMap("metadata", "value")); client.createWithResponse(metadata, PublicAccessType.CONTAINER).subscribe(response -> System.out.printf("Create completed with status %d%n", response.statusCode())); } /** * Code snippet for {@link ContainerAsyncClient */ public void delete() { client.delete().subscribe( response -> System.out.printf("Delete completed%n"), error -> System.out.printf("Delete failed: %s%n", error)); } /** * Code snippet for {@link ContainerAsyncClient */ public void delete2() { ContainerAccessConditions accessConditions = new ContainerAccessConditions() .leaseAccessConditions(new LeaseAccessConditions().leaseId(leaseId)) .modifiedAccessConditions(new ModifiedAccessConditions() .ifUnmodifiedSince(OffsetDateTime.now().minusDays(3))); client.deleteWithResponse(accessConditions).subscribe(response -> System.out.printf("Delete completed with status %d%n", response.statusCode())); } /** * Code snippet for {@link ContainerAsyncClient */ public void getProperties() { client.getProperties().subscribe(response -> System.out.printf("Public Access Type: %s, Legal Hold? %b, Immutable? %b%n", response.blobPublicAccess(), response.hasLegalHold(), response.hasImmutabilityPolicy())); } /** * Code snippet for {@link ContainerAsyncClient */ public void getProperties2() { LeaseAccessConditions accessConditions = new LeaseAccessConditions().leaseId(leaseId); client.getPropertiesWithResponse(accessConditions).subscribe(response -> System.out.printf("Public Access Type: %s, Legal Hold? %b, Immutable? %b%n", response.value().blobPublicAccess(), response.value().hasLegalHold(), response.value().hasImmutabilityPolicy())); } /** * Code snippet for {@link ContainerAsyncClient */ public void setMetadata() { Metadata metadata = new Metadata(Collections.singletonMap("metadata", "value")); client.setMetadata(metadata).subscribe( response -> System.out.printf("Set metadata completed%n"), error -> System.out.printf("Set metadata failed: %s%n", error)); } /** * Code snippet for {@link ContainerAsyncClient */ public void setMetadata2() { Metadata metadata = new Metadata(Collections.singletonMap("metadata", "value")); ContainerAccessConditions accessConditions = new ContainerAccessConditions() .leaseAccessConditions(new LeaseAccessConditions().leaseId(leaseId)) .modifiedAccessConditions(new ModifiedAccessConditions() .ifUnmodifiedSince(OffsetDateTime.now().minusDays(3))); client.setMetadataWithResponse(metadata, accessConditions).subscribe(response -> System.out.printf("Set metadata completed with status %d%n", response.statusCode())); } /** * Code snippet for {@link ContainerAsyncClient */ public void getAccessPolicy() { client.getAccessPolicy().subscribe(response -> { System.out.printf("Blob Access Type: %s%n", response.getBlobAccessType()); for (SignedIdentifier identifier : response.getIdentifiers()) { System.out.printf("Identifier Name: %s, Permissions %s%n", identifier.id(), identifier.accessPolicy().permission()); } }); } /** * Code snippet for {@link ContainerAsyncClient */ public void getAccessPolicy2() { LeaseAccessConditions accessConditions = new LeaseAccessConditions().leaseId(leaseId); client.getAccessPolicyWithResponse(accessConditions).subscribe(response -> { System.out.printf("Blob Access Type: %s%n", response.value().getBlobAccessType()); for (SignedIdentifier identifier : response.value().getIdentifiers()) { System.out.printf("Identifier Name: %s, Permissions %s%n", identifier.id(), identifier.accessPolicy().permission()); } }); } /** * Code snippet for {@link ContainerAsyncClient */ public void setAccessPolicy() { SignedIdentifier identifier = new SignedIdentifier() .id("name") .accessPolicy(new AccessPolicy() .start(OffsetDateTime.now()) .expiry(OffsetDateTime.now().plusDays(7)) .permission("permissionString")); client.setAccessPolicy(PublicAccessType.CONTAINER, Collections.singletonList(identifier)).subscribe( response -> System.out.printf("Set access policy completed%n"), error -> System.out.printf("Set access policy failed: %s%n", error)); } /** * Code snippet for {@link ContainerAsyncClient */ public void setAccessPolicy2() { SignedIdentifier identifier = new SignedIdentifier() .id("name") .accessPolicy(new AccessPolicy() .start(OffsetDateTime.now()) .expiry(OffsetDateTime.now().plusDays(7)) .permission("permissionString")); ContainerAccessConditions accessConditions = new ContainerAccessConditions() .leaseAccessConditions(new LeaseAccessConditions().leaseId(leaseId)) .modifiedAccessConditions(new ModifiedAccessConditions() .ifUnmodifiedSince(OffsetDateTime.now().minusDays(3))); client.setAccessPolicyWithResponse(PublicAccessType.CONTAINER, Collections.singletonList(identifier), accessConditions) .subscribe(response -> System.out.printf("Set access policy completed with status %d%n", response.statusCode())); } /** * Code snippet for {@link ContainerAsyncClient */ public void listBlobsFlat() { client.listBlobsFlat().subscribe(blob -> System.out.printf("Name: %s, Directory? %b%n", blob.name(), blob.isPrefix())); } /** * Code snippet for {@link ContainerAsyncClient */ public void listBlobsFlat2() { ListBlobsOptions options = new ListBlobsOptions() .prefix("prefixToMatch") .details(new BlobListDetails() .deletedBlobs(true) .snapshots(true)); client.listBlobsFlat(options).subscribe(blob -> System.out.printf("Name: %s, Directory? %b, Deleted? %b, Snapshot ID: %s%n", blob.name(), blob.isPrefix(), blob.deleted(), blob.snapshot())); } /** * Code snippet for {@link ContainerAsyncClient */ public void listBlobsHierarchy() { client.listBlobsHierarchy("directoryName").subscribe(blob -> System.out.printf("Name: %s, Directory? %b%n", blob.name(), blob.isPrefix())); } /** * Code snippet for {@link ContainerAsyncClient */ public void listBlobsHierarchy2() { ListBlobsOptions options = new ListBlobsOptions() .prefix("directoryName") .details(new BlobListDetails() .deletedBlobs(true) .snapshots(true)); client.listBlobsHierarchy("/", options).subscribe(blob -> System.out.printf("Name: %s, Directory? %b, Deleted? %b, Snapshot ID: %s%n", blob.name(), blob.isPrefix(), blob.deleted(), blob.snapshot())); } /** * Code snippet for {@link ContainerAsyncClient */ public void acquireLease() { client.acquireLease(proposedId, leaseDuration).subscribe(response -> System.out.printf("Lease ID: %s%n", response)); } /** * Code snippet for {@link ContainerAsyncClient */ public void acquireLease2() { ModifiedAccessConditions accessConditions = new ModifiedAccessConditions() .ifUnmodifiedSince(OffsetDateTime.now().minusDays(3)); client.acquireLeaseWithResponse(proposedId, leaseDuration, accessConditions).subscribe(response -> System.out.printf("Lease ID: %s%n", response.value())); } /** * Code snippet for {@link ContainerAsyncClient */ public void renewLease() { client.renewLease(leaseId).subscribe(response -> System.out.printf("Renewed Lease ID: %s%n", response)); } /** * Code snippet for {@link ContainerAsyncClient */ public void renewLease2() { ModifiedAccessConditions accessConditions = new ModifiedAccessConditions() .ifUnmodifiedSince(OffsetDateTime.now().minusDays(3)); client.renewLeaseWithResponse(leaseId, accessConditions).subscribe(response -> System.out.printf("Renewed Lease ID: %s%n", response.value())); } /** * Code snippet for {@link ContainerAsyncClient */ public void releaseLease() { client.releaseLease(leaseId).subscribe( response -> System.out.printf("Release lease completed%n"), error -> System.out.printf("Release lease failed: %n", error)); } /** * Code snippet for {@link ContainerAsyncClient */ public void releaseLease2() { ModifiedAccessConditions accessConditions = new ModifiedAccessConditions() .ifUnmodifiedSince(OffsetDateTime.now().minusDays(3)); client.releaseLeaseWithResponse(leaseId, accessConditions).subscribe(response -> System.out.printf("Release lease completed with status %d%n", response.statusCode())); } /** * Code snippet for {@link ContainerAsyncClient */ public void breakLease() { client.breakLease().subscribe(response -> System.out.printf("Broken lease had %d seconds remaining on the lease%n", response.getSeconds())); } /** * Code snippet for {@link ContainerAsyncClient */ public void breakLease2() { ModifiedAccessConditions accessConditions = new ModifiedAccessConditions() .ifUnmodifiedSince(OffsetDateTime.now().minusDays(3)); client.breakLeaseWithResponse(10, accessConditions).subscribe(response -> System.out.printf("Broken lease had %d seconds remaining on the lease%n", response.value().getSeconds())); } /** * Code snippet for {@link ContainerAsyncClient */ public void changeLease() { client.changeLease(leaseId, proposedId).subscribe(response -> System.out.printf("Changed Lease ID: %s%n", response)); } /** * Code snippet for {@link ContainerAsyncClient */ public void changeLease2() { ModifiedAccessConditions accessConditions = new ModifiedAccessConditions() .ifUnmodifiedSince(OffsetDateTime.now().minusDays(3)); client.changeLeaseWithResponse(leaseId, proposedId, accessConditions).subscribe(response -> System.out.printf("Changed Lease ID: %s%n", response.value())); } /** * Code snippet for {@link ContainerAsyncClient */ public void getAccountInfo() { client.getAccountInfo().subscribe(response -> System.out.printf("Account Kind: %s, SKU: %s%n", response.accountKind(), response.skuName())); } /** * Code snippet for {@link ContainerAsyncClient */ public void getAccountInfo2() { client.getAccountInfoWithResponse().subscribe(response -> System.out.printf("Account Kind: %s, SKU: %s%n", response.value().accountKind(), response.value().skuName())); } }
class ContainerAsyncClientJavaDocCodeSnippets { private ContainerAsyncClient client = JavaDocCodeSnippetsHelpers.getContainerAsyncClient(); private String blobName = "blobName"; private String snapshot = "snapshot"; private String leaseId = "leaseId"; private String proposedId = "proposedId"; private int leaseDuration = (int) Duration.ofSeconds(30).getSeconds(); /** * Code snippet for {@link ContainerAsyncClient * ContainerSASPermission, OffsetDateTime, OffsetDateTime, String, SASProtocol, IPRange, String, String, String, * String, String)} */ public void generateUserDelegationSASCodeSnippets() { ContainerSASPermission permissions = new ContainerSASPermission() .read(true) .write(true) .create(true) .delete(true) .add(true) .list(true); OffsetDateTime startTime = OffsetDateTime.now().minusDays(1); OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1); IPRange ipRange = new IPRange() .ipMin("0.0.0.0") .ipMax("255.255.255.255"); SASProtocol sasProtocol = SASProtocol.HTTPS_HTTP; String cacheControl = "cache"; String contentDisposition = "disposition"; String contentEncoding = "encoding"; String contentLanguage = "language"; String contentType = "type"; String version = Constants.HeaderConstants.TARGET_STORAGE_VERSION; String accountName = "accountName"; UserDelegationKey userDelegationKey = new UserDelegationKey(); String sas = client.generateUserDelegationSAS(userDelegationKey, accountName, permissions, expiryTime, startTime, version, sasProtocol, ipRange, cacheControl, contentDisposition, contentEncoding, contentLanguage, contentType); } /** * Code snippet for {@link ContainerAsyncClient * OffsetDateTime, String, SASProtocol, IPRange, String, String, String, String, String)} */ public void generateSASCodeSnippets() { ContainerSASPermission permissions = new ContainerSASPermission() .read(true) .write(true) .create(true) .delete(true) .add(true) .list(true); OffsetDateTime startTime = OffsetDateTime.now().minusDays(1); OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1); IPRange ipRange = new IPRange() .ipMin("0.0.0.0") .ipMax("255.255.255.255"); SASProtocol sasProtocol = SASProtocol.HTTPS_HTTP; String cacheControl = "cache"; String contentDisposition = "disposition"; String contentEncoding = "encoding"; String contentLanguage = "language"; String contentType = "type"; String identifier = "identifier"; String version = Constants.HeaderConstants.TARGET_STORAGE_VERSION; String sas = client.generateSAS(identifier, permissions, expiryTime, startTime, version, sasProtocol, ipRange, cacheControl, contentDisposition, contentEncoding, contentLanguage, contentType); } /** * Code snippet for {@link ContainerAsyncClient */ public void getBlobAsyncClient() { BlobAsyncClient blobAsyncClient = client.getBlobAsyncClient(blobName); } /** * Code snippet for {@link ContainerAsyncClient */ public void getSnapshotBlobAsyncClient() { BlobAsyncClient blobAsyncClient = client.getBlobAsyncClient(blobName, snapshot); } /** * Code snippet for {@link ContainerAsyncClient */ public void getAppendBlobAsyncClient() { AppendBlobAsyncClient appendBlobAsyncClient = client.getAppendBlobAsyncClient(blobName); } /** * Code snippet for {@link ContainerAsyncClient */ public void getSnapshotAppendBlobAsyncClient() { AppendBlobAsyncClient appendBlobAsyncClient = client.getAppendBlobAsyncClient(blobName, snapshot); } /** * Code snippet for {@link ContainerAsyncClient */ public void getBlockBlobAsyncClient() { BlockBlobAsyncClient blockBlobAsyncClient = client.getBlockBlobAsyncClient(blobName); } /** * Code snippet for {@link ContainerAsyncClient */ public void getSnapshotBlockBlobAsyncClient() { BlockBlobAsyncClient blockBlobAsyncClient = client.getBlockBlobAsyncClient(blobName, snapshot); } /** * Code snippet for {@link ContainerAsyncClient */ public void getPageBlobAsyncClient() { PageBlobAsyncClient pageBlobAsyncClient = client.getPageBlobAsyncClient(blobName); } /** * Code snippet for {@link ContainerAsyncClient */ public void getSnapshotPageBlobAsyncClient() { PageBlobAsyncClient pageBlobAsyncClient = client.getPageBlobAsyncClient(blobName, snapshot); } /** * Code snippet for {@link ContainerAsyncClient */ public void exists() { client.exists().subscribe(response -> System.out.printf("Exists? %b%n", response)); } /** * Code snippet for {@link ContainerAsyncClient */ /** * Code snippet for {@link ContainerAsyncClient */ public void existsWithResponse2() { Context context = new Context("key", "value"); client.existsWithResponse(context).subscribe(response -> System.out.printf("Exists? %b%n", response.value())); } /** * Code snippet for {@link ContainerAsyncClient */ public void create() { client.create().subscribe( response -> System.out.printf("Create completed%n"), error -> System.out.printf("Error while creating container %s%n", error)); } /** * Code snippet for {@link ContainerAsyncClient */ public void create2() { Metadata metadata = new Metadata(Collections.singletonMap("metadata", "value")); client.createWithResponse(metadata, PublicAccessType.CONTAINER).subscribe(response -> System.out.printf("Create completed with status %d%n", response.statusCode())); } /** * Code snippet for {@link ContainerAsyncClient */ public void delete() { client.delete().subscribe( response -> System.out.printf("Delete completed%n"), error -> System.out.printf("Delete failed: %s%n", error)); } /** * Code snippet for {@link ContainerAsyncClient */ public void delete2() { ContainerAccessConditions accessConditions = new ContainerAccessConditions() .leaseAccessConditions(new LeaseAccessConditions().leaseId(leaseId)) .modifiedAccessConditions(new ModifiedAccessConditions() .ifUnmodifiedSince(OffsetDateTime.now().minusDays(3))); client.deleteWithResponse(accessConditions).subscribe(response -> System.out.printf("Delete completed with status %d%n", response.statusCode())); } /** * Code snippet for {@link ContainerAsyncClient */ public void getProperties() { client.getProperties().subscribe(response -> System.out.printf("Public Access Type: %s, Legal Hold? %b, Immutable? %b%n", response.blobPublicAccess(), response.hasLegalHold(), response.hasImmutabilityPolicy())); } /** * Code snippet for {@link ContainerAsyncClient */ public void getProperties2() { LeaseAccessConditions accessConditions = new LeaseAccessConditions().leaseId(leaseId); client.getPropertiesWithResponse(accessConditions).subscribe(response -> System.out.printf("Public Access Type: %s, Legal Hold? %b, Immutable? %b%n", response.value().blobPublicAccess(), response.value().hasLegalHold(), response.value().hasImmutabilityPolicy())); } /** * Code snippet for {@link ContainerAsyncClient */ public void setMetadata() { Metadata metadata = new Metadata(Collections.singletonMap("metadata", "value")); client.setMetadata(metadata).subscribe( response -> System.out.printf("Set metadata completed%n"), error -> System.out.printf("Set metadata failed: %s%n", error)); } /** * Code snippet for {@link ContainerAsyncClient */ public void setMetadata2() { Metadata metadata = new Metadata(Collections.singletonMap("metadata", "value")); ContainerAccessConditions accessConditions = new ContainerAccessConditions() .leaseAccessConditions(new LeaseAccessConditions().leaseId(leaseId)) .modifiedAccessConditions(new ModifiedAccessConditions() .ifUnmodifiedSince(OffsetDateTime.now().minusDays(3))); client.setMetadataWithResponse(metadata, accessConditions).subscribe(response -> System.out.printf("Set metadata completed with status %d%n", response.statusCode())); } /** * Code snippet for {@link ContainerAsyncClient */ public void getAccessPolicy() { client.getAccessPolicy().subscribe(response -> { System.out.printf("Blob Access Type: %s%n", response.getBlobAccessType()); for (SignedIdentifier identifier : response.getIdentifiers()) { System.out.printf("Identifier Name: %s, Permissions %s%n", identifier.id(), identifier.accessPolicy().permission()); } }); } /** * Code snippet for {@link ContainerAsyncClient */ public void getAccessPolicy2() { LeaseAccessConditions accessConditions = new LeaseAccessConditions().leaseId(leaseId); client.getAccessPolicyWithResponse(accessConditions).subscribe(response -> { System.out.printf("Blob Access Type: %s%n", response.value().getBlobAccessType()); for (SignedIdentifier identifier : response.value().getIdentifiers()) { System.out.printf("Identifier Name: %s, Permissions %s%n", identifier.id(), identifier.accessPolicy().permission()); } }); } /** * Code snippet for {@link ContainerAsyncClient */ public void setAccessPolicy() { SignedIdentifier identifier = new SignedIdentifier() .id("name") .accessPolicy(new AccessPolicy() .start(OffsetDateTime.now()) .expiry(OffsetDateTime.now().plusDays(7)) .permission("permissionString")); client.setAccessPolicy(PublicAccessType.CONTAINER, Collections.singletonList(identifier)).subscribe( response -> System.out.printf("Set access policy completed%n"), error -> System.out.printf("Set access policy failed: %s%n", error)); } /** * Code snippet for {@link ContainerAsyncClient */ public void setAccessPolicy2() { SignedIdentifier identifier = new SignedIdentifier() .id("name") .accessPolicy(new AccessPolicy() .start(OffsetDateTime.now()) .expiry(OffsetDateTime.now().plusDays(7)) .permission("permissionString")); ContainerAccessConditions accessConditions = new ContainerAccessConditions() .leaseAccessConditions(new LeaseAccessConditions().leaseId(leaseId)) .modifiedAccessConditions(new ModifiedAccessConditions() .ifUnmodifiedSince(OffsetDateTime.now().minusDays(3))); client.setAccessPolicyWithResponse(PublicAccessType.CONTAINER, Collections.singletonList(identifier), accessConditions) .subscribe(response -> System.out.printf("Set access policy completed with status %d%n", response.statusCode())); } /** * Code snippet for {@link ContainerAsyncClient */ public void listBlobsFlat() { client.listBlobsFlat().subscribe(blob -> System.out.printf("Name: %s, Directory? %b%n", blob.name(), blob.isPrefix())); } /** * Code snippet for {@link ContainerAsyncClient */ public void listBlobsFlat2() { ListBlobsOptions options = new ListBlobsOptions() .prefix("prefixToMatch") .details(new BlobListDetails() .deletedBlobs(true) .snapshots(true)); client.listBlobsFlat(options).subscribe(blob -> System.out.printf("Name: %s, Directory? %b, Deleted? %b, Snapshot ID: %s%n", blob.name(), blob.isPrefix(), blob.deleted(), blob.snapshot())); } /** * Code snippet for {@link ContainerAsyncClient */ public void listBlobsHierarchy() { client.listBlobsHierarchy("directoryName").subscribe(blob -> System.out.printf("Name: %s, Directory? %b%n", blob.name(), blob.isPrefix())); } /** * Code snippet for {@link ContainerAsyncClient */ public void listBlobsHierarchy2() { ListBlobsOptions options = new ListBlobsOptions() .prefix("directoryName") .details(new BlobListDetails() .deletedBlobs(true) .snapshots(true)); client.listBlobsHierarchy("/", options).subscribe(blob -> System.out.printf("Name: %s, Directory? %b, Deleted? %b, Snapshot ID: %s%n", blob.name(), blob.isPrefix(), blob.deleted(), blob.snapshot())); } /** * Code snippet for {@link ContainerAsyncClient */ public void acquireLease() { client.acquireLease(proposedId, leaseDuration).subscribe(response -> System.out.printf("Lease ID: %s%n", response)); } /** * Code snippet for {@link ContainerAsyncClient */ public void acquireLease2() { ModifiedAccessConditions accessConditions = new ModifiedAccessConditions() .ifUnmodifiedSince(OffsetDateTime.now().minusDays(3)); client.acquireLeaseWithResponse(proposedId, leaseDuration, accessConditions).subscribe(response -> System.out.printf("Lease ID: %s%n", response.value())); } /** * Code snippet for {@link ContainerAsyncClient */ public void renewLease() { client.renewLease(leaseId).subscribe(response -> System.out.printf("Renewed Lease ID: %s%n", response)); } /** * Code snippet for {@link ContainerAsyncClient */ public void renewLease2() { ModifiedAccessConditions accessConditions = new ModifiedAccessConditions() .ifUnmodifiedSince(OffsetDateTime.now().minusDays(3)); client.renewLeaseWithResponse(leaseId, accessConditions).subscribe(response -> System.out.printf("Renewed Lease ID: %s%n", response.value())); } /** * Code snippet for {@link ContainerAsyncClient */ public void releaseLease() { client.releaseLease(leaseId).subscribe( response -> System.out.printf("Release lease completed%n"), error -> System.out.printf("Release lease failed: %n", error)); } /** * Code snippet for {@link ContainerAsyncClient */ public void releaseLease2() { ModifiedAccessConditions accessConditions = new ModifiedAccessConditions() .ifUnmodifiedSince(OffsetDateTime.now().minusDays(3)); client.releaseLeaseWithResponse(leaseId, accessConditions).subscribe(response -> System.out.printf("Release lease completed with status %d%n", response.statusCode())); } /** * Code snippet for {@link ContainerAsyncClient */ public void breakLease() { client.breakLease().subscribe(response -> System.out.printf("Broken lease had %d seconds remaining on the lease%n", response.getSeconds())); } /** * Code snippet for {@link ContainerAsyncClient */ public void breakLease2() { ModifiedAccessConditions accessConditions = new ModifiedAccessConditions() .ifUnmodifiedSince(OffsetDateTime.now().minusDays(3)); client.breakLeaseWithResponse(10, accessConditions).subscribe(response -> System.out.printf("Broken lease had %d seconds remaining on the lease%n", response.value().getSeconds())); } /** * Code snippet for {@link ContainerAsyncClient */ public void changeLease() { client.changeLease(leaseId, proposedId).subscribe(response -> System.out.printf("Changed Lease ID: %s%n", response)); } /** * Code snippet for {@link ContainerAsyncClient */ public void changeLease2() { ModifiedAccessConditions accessConditions = new ModifiedAccessConditions() .ifUnmodifiedSince(OffsetDateTime.now().minusDays(3)); client.changeLeaseWithResponse(leaseId, proposedId, accessConditions).subscribe(response -> System.out.printf("Changed Lease ID: %s%n", response.value())); } /** * Code snippet for {@link ContainerAsyncClient */ public void getAccountInfo() { client.getAccountInfo().subscribe(response -> System.out.printf("Account Kind: %s, SKU: %s%n", response.accountKind(), response.skuName())); } /** * Code snippet for {@link ContainerAsyncClient */ public void getAccountInfo2() { client.getAccountInfoWithResponse().subscribe(response -> System.out.printf("Account Kind: %s, SKU: %s%n", response.value().accountKind(), response.value().skuName())); } }
Does this fail checkstyles? I thought the `:` would be on a new line
public Mono<String> updateCheckpoint(Checkpoint checkpoint) { if (checkpoint.sequenceNumber() == null && checkpoint.offset() == null) { throw logger.logExceptionAsWarning(Exceptions .propagate(new IllegalStateException( "Both sequence number and offset cannot be null when updating a checkpoint"))); } String partitionId = checkpoint.partitionId(); String blobName = getBlobName(checkpoint.eventHubName(), checkpoint.consumerGroupName(), partitionId); if (!blobClients.containsKey(blobName)) { blobClients.put(blobName, containerAsyncClient.getBlobAsyncClient(blobName)); } Metadata metadata = new Metadata(); String sequenceNumber = checkpoint.sequenceNumber() == null ? null : String.valueOf(checkpoint.sequenceNumber()); metadata.put(SEQUENCE_NUMBER, sequenceNumber); metadata.put(OFFSET, checkpoint.offset()); metadata.put(OWNER_ID, checkpoint.ownerId()); BlobAsyncClient blobAsyncClient = blobClients.get(blobName); BlobAccessConditions blobAccessConditions = new BlobAccessConditions() .modifiedAccessConditions(new ModifiedAccessConditions().ifMatch(checkpoint.eTag())); return blobAsyncClient.setMetadataWithResponse(metadata, blobAccessConditions) .map(response -> response.headers().get(ETAG).value()); }
String sequenceNumber = checkpoint.sequenceNumber() == null ? null :
public Mono<String> updateCheckpoint(Checkpoint checkpoint) { if (checkpoint.sequenceNumber() == null && checkpoint.offset() == null) { throw logger.logExceptionAsWarning(Exceptions .propagate(new IllegalStateException( "Both sequence number and offset cannot be null when updating a checkpoint"))); } String partitionId = checkpoint.partitionId(); String blobName = getBlobName(checkpoint.eventHubName(), checkpoint.consumerGroupName(), partitionId); if (!blobClients.containsKey(blobName)) { blobClients.put(blobName, containerAsyncClient.getBlobAsyncClient(blobName)); } Metadata metadata = new Metadata(); String sequenceNumber = checkpoint.sequenceNumber() == null ? null : String.valueOf(checkpoint.sequenceNumber()); String offset = checkpoint.offset() == null ? null : String.valueOf(checkpoint.offset()); metadata.put(SEQUENCE_NUMBER, sequenceNumber); metadata.put(OFFSET, offset); metadata.put(OWNER_ID, checkpoint.ownerId()); BlobAsyncClient blobAsyncClient = blobClients.get(blobName); BlobAccessConditions blobAccessConditions = new BlobAccessConditions() .modifiedAccessConditions(new ModifiedAccessConditions().ifMatch(checkpoint.eTag())); return blobAsyncClient.setMetadataWithResponse(metadata, blobAccessConditions) .map(response -> response.headers().get(ETAG).value()); }
class BlobPartitionManager implements PartitionManager { private static final String SEQUENCE_NUMBER = "sequenceNumber"; private static final String OFFSET = "offset"; private static final String OWNER_ID = "ownerId"; private static final String ETAG = "eTag"; private static final String BLOB_PATH_SEPARATOR = "/"; private static final ByteBuffer UPLOAD_DATA = ByteBuffer.wrap("" .getBytes(UTF_8)); private final ContainerAsyncClient containerAsyncClient; private final ClientLogger logger = new ClientLogger(BlobPartitionManager.class); private final Map<String, BlobAsyncClient> blobClients = new ConcurrentHashMap<>(); /** * Creates an instance of BlobPartitionManager. * * @param containerAsyncClient The {@link ContainerAsyncClient} this instance will use to read and update blobs in * the storage container. */ public BlobPartitionManager(ContainerAsyncClient containerAsyncClient) { this.containerAsyncClient = containerAsyncClient; } /** * {@inheritDoc} * * @param eventHubName The Event Hub name to get ownership information. * @param consumerGroupName The consumer group name to get ownership information. * @return A {@link Flux} of current partition ownership information. */ @Override public Flux<PartitionOwnership> listOwnership(String eventHubName, String consumerGroupName) { String prefix = getBlobPrefix(eventHubName, consumerGroupName); BlobListDetails details = new BlobListDetails().metadata(true); ListBlobsOptions options = new ListBlobsOptions().prefix(prefix).details(details); return containerAsyncClient.listBlobsFlat(options) .filter(blobItem -> blobItem.name().split(BLOB_PATH_SEPARATOR).length == 3) .map(this::convertToPartitionOwnership); } /** * {@inheritDoc} * * @param requestedPartitionOwnerships Array of partition ownerships this instance is requesting to own. * @return A {@link Flux} of successfully claimed ownership of partitions. */ @Override public Flux<PartitionOwnership> claimOwnership(PartitionOwnership... requestedPartitionOwnerships) { return Flux.fromArray(requestedPartitionOwnerships).flatMap( partitionOwnership -> { String partitionId = partitionOwnership.partitionId(); String blobName = getBlobName(partitionOwnership.eventHubName(), partitionOwnership.consumerGroupName(), partitionId); if (!blobClients.containsKey(blobName)) { blobClients.put(blobName, containerAsyncClient.getBlobAsyncClient(blobName)); } BlobAsyncClient blobAsyncClient = blobClients.get(blobName); Metadata metadata = new Metadata(); metadata.put(OWNER_ID, partitionOwnership.ownerId()); metadata.put(OFFSET, partitionOwnership.offset()); Long sequenceNumber = partitionOwnership.sequenceNumber(); metadata.put(SEQUENCE_NUMBER, sequenceNumber == null ? null : String.valueOf(sequenceNumber)); BlobAccessConditions blobAccessConditions = new BlobAccessConditions(); if (ImplUtils.isNullOrEmpty(partitionOwnership.eTag())) { blobAccessConditions.modifiedAccessConditions(new ModifiedAccessConditions() .ifNoneMatch("*")); return blobAsyncClient.asBlockBlobAsyncClient() .uploadWithResponse(Flux.just(UPLOAD_DATA), 0, null, metadata, blobAccessConditions) .flatMapMany(response -> { partitionOwnership.eTag(response.headers().get(ETAG).value()); return Mono.just(partitionOwnership); }, error -> { logger.info("Couldn't claim ownership of partition {}, error {}", partitionId, error.getMessage()); return Mono.empty(); }, Mono::empty); } else { blobAccessConditions.modifiedAccessConditions(new ModifiedAccessConditions() .ifMatch(partitionOwnership.eTag())); return blobAsyncClient.setMetadataWithResponse(metadata, blobAccessConditions) .flatMapMany(response -> { partitionOwnership.eTag(response.headers().get(ETAG).value()); return Mono.just(partitionOwnership); }, error -> { logger.info("Couldn't claim ownership of partition {}, error {}", partitionId, error.getMessage()); return Mono.empty(); }, () -> Mono.empty()); } } ); } /** * {@inheritDoc} * * @param checkpoint Checkpoint information containing sequence number and offset to be stored for this partition. * @return A {@link Mono} containing the new ETag generated from a successful checkpoint update. */ @Override private String getBlobPrefix(String eventHubName, String consumerGroupName) { return eventHubName + BLOB_PATH_SEPARATOR + consumerGroupName; } private String getBlobName(String eventHubName, String consumerGroupName, String partitionId) { return eventHubName + BLOB_PATH_SEPARATOR + consumerGroupName + BLOB_PATH_SEPARATOR + partitionId; } private PartitionOwnership convertToPartitionOwnership(BlobItem blobItem) { PartitionOwnership partitionOwnership = new PartitionOwnership(); logger.info("Found blob for partition {}", blobItem.name()); String[] names = blobItem.name().split(BLOB_PATH_SEPARATOR); partitionOwnership.eventHubName(names[0]); partitionOwnership.consumerGroupName(names[1]); partitionOwnership.partitionId(names[2]); if (ImplUtils.isNullOrEmpty(blobItem.metadata())) { logger.warning("No metadata available for blob {}", blobItem.name()); return partitionOwnership; } blobItem.metadata().entrySet().stream() .forEach(entry -> { switch (entry.getKey()) { case OWNER_ID: partitionOwnership.ownerId(entry.getValue()); break; case SEQUENCE_NUMBER: partitionOwnership.sequenceNumber(Long.valueOf(entry.getValue())); break; case OFFSET: partitionOwnership.offset(entry.getValue()); break; default: break; } }); BlobProperties blobProperties = blobItem.properties(); partitionOwnership.lastModifiedTime(blobProperties.lastModified().toInstant().toEpochMilli()); partitionOwnership.eTag(blobProperties.etag()); return partitionOwnership; } }
class BlobPartitionManager implements PartitionManager { private static final String SEQUENCE_NUMBER = "SequenceNumber"; private static final String OFFSET = "Offset"; private static final String OWNER_ID = "OwnerId"; private static final String ETAG = "eTag"; private static final String BLOB_PATH_SEPARATOR = "/"; private static final ByteBuffer UPLOAD_DATA = ByteBuffer.wrap("" .getBytes(UTF_8)); private final ContainerAsyncClient containerAsyncClient; private final ClientLogger logger = new ClientLogger(BlobPartitionManager.class); private final Map<String, BlobAsyncClient> blobClients = new ConcurrentHashMap<>(); /** * Creates an instance of BlobPartitionManager. * * @param containerAsyncClient The {@link ContainerAsyncClient} this instance will use to read and update blobs in * the storage container. */ public BlobPartitionManager(ContainerAsyncClient containerAsyncClient) { this.containerAsyncClient = containerAsyncClient; } /** * This method is called by the {@link EventProcessor} to get the list of all existing partition ownership from the * Storage Blobs. Could return empty results if there are is no existing ownership information. * * @param eventHubName The Event Hub name to get ownership information. * @param consumerGroupName The consumer group name. * @return A flux of partition ownership details of all the partitions that have/had an owner. */ @Override public Flux<PartitionOwnership> listOwnership(String eventHubName, String consumerGroupName) { String prefix = getBlobPrefix(eventHubName, consumerGroupName); BlobListDetails details = new BlobListDetails().metadata(true); ListBlobsOptions options = new ListBlobsOptions().prefix(prefix).details(details); return containerAsyncClient.listBlobsFlat(options) .filter(blobItem -> blobItem.name().split(BLOB_PATH_SEPARATOR).length == 3) .map(this::convertToPartitionOwnership); } /** * This method is called by the {@link EventProcessor} to claim ownership of a list of partitions. This will return * the list of partitions that were owned successfully. * * @param requestedPartitionOwnerships Array of partition ownerships this instance is requesting to own. * @return A flux of partitions this instance successfully claimed ownership. */ @Override public Flux<PartitionOwnership> claimOwnership(PartitionOwnership... requestedPartitionOwnerships) { return Flux.fromArray(requestedPartitionOwnerships).flatMap( partitionOwnership -> { String partitionId = partitionOwnership.partitionId(); String blobName = getBlobName(partitionOwnership.eventHubName(), partitionOwnership.consumerGroupName(), partitionId); if (!blobClients.containsKey(blobName)) { blobClients.put(blobName, containerAsyncClient.getBlobAsyncClient(blobName)); } BlobAsyncClient blobAsyncClient = blobClients.get(blobName); Metadata metadata = new Metadata(); metadata.put(OWNER_ID, partitionOwnership.ownerId()); Long offset = partitionOwnership.offset(); metadata.put(OFFSET, offset == null ? null : String.valueOf(offset)); Long sequenceNumber = partitionOwnership.sequenceNumber(); metadata.put(SEQUENCE_NUMBER, sequenceNumber == null ? null : String.valueOf(sequenceNumber)); BlobAccessConditions blobAccessConditions = new BlobAccessConditions(); if (ImplUtils.isNullOrEmpty(partitionOwnership.eTag())) { blobAccessConditions.modifiedAccessConditions(new ModifiedAccessConditions() .ifNoneMatch("*")); return blobAsyncClient.asBlockBlobAsyncClient() .uploadWithResponse(Flux.just(UPLOAD_DATA), 0, null, metadata, blobAccessConditions) .flatMapMany(response -> { partitionOwnership.eTag(response.headers().get(ETAG).value()); return Mono.just(partitionOwnership); }, error -> { logger.info("Couldn't claim ownership of partition {}, error {}", partitionId, error.getMessage()); return Mono.empty(); }, Mono::empty); } else { blobAccessConditions.modifiedAccessConditions(new ModifiedAccessConditions() .ifMatch(partitionOwnership.eTag())); return blobAsyncClient.setMetadataWithResponse(metadata, blobAccessConditions) .flatMapMany(response -> { partitionOwnership.eTag(response.headers().get(ETAG).value()); return Mono.just(partitionOwnership); }, error -> { logger.info("Couldn't claim ownership of partition {}, error {}", partitionId, error.getMessage()); return Mono.empty(); }, () -> Mono.empty()); } } ); } /** * Updates the checkpoint in Storage Blobs for a partition. * * @param checkpoint Checkpoint information containing sequence number and offset to be stored for this partition. * @return The new ETag on successful update. */ @Override private String getBlobPrefix(String eventHubName, String consumerGroupName) { return eventHubName + BLOB_PATH_SEPARATOR + consumerGroupName; } private String getBlobName(String eventHubName, String consumerGroupName, String partitionId) { return eventHubName + BLOB_PATH_SEPARATOR + consumerGroupName + BLOB_PATH_SEPARATOR + partitionId; } private PartitionOwnership convertToPartitionOwnership(BlobItem blobItem) { PartitionOwnership partitionOwnership = new PartitionOwnership(); logger.info("Found blob for partition {}", blobItem.name()); String[] names = blobItem.name().split(BLOB_PATH_SEPARATOR); partitionOwnership.eventHubName(names[0]); partitionOwnership.consumerGroupName(names[1]); partitionOwnership.partitionId(names[2]); if (ImplUtils.isNullOrEmpty(blobItem.metadata())) { logger.warning("No metadata available for blob {}", blobItem.name()); return partitionOwnership; } blobItem.metadata().entrySet().stream() .forEach(entry -> { switch (entry.getKey()) { case OWNER_ID: partitionOwnership.ownerId(entry.getValue()); break; case SEQUENCE_NUMBER: partitionOwnership.sequenceNumber(Long.valueOf(entry.getValue())); break; case OFFSET: partitionOwnership.offset(Long.valueOf(entry.getValue())); break; default: break; } }); BlobProperties blobProperties = blobItem.properties(); partitionOwnership.lastModifiedTime(blobProperties.lastModified().toInstant().toEpochMilli()); partitionOwnership.eTag(blobProperties.etag()); return partitionOwnership; } }
Just to make sure, is metadata case sensitive? @chradek was mentioning some bug about it.. and it would be nice to interop between language libraries.
private Metadata getMetadata(String owner, String sequenceNumber, String offset) { Metadata metadata = new Metadata(); metadata.put("ownerId", owner); metadata.put("sequenceNumber", sequenceNumber); metadata.put("offset", offset); return metadata; }
Metadata metadata = new Metadata();
private Metadata getMetadata(String owner, String sequenceNumber, String offset) { Metadata metadata = new Metadata(); metadata.put("OwnerId", owner); metadata.put("SequenceNumber", sequenceNumber); metadata.put("Offset", offset); return metadata; }
class BlobPartitionManagerTest { @Mock private ContainerAsyncClient containerAsyncClient; @Mock private BlockBlobAsyncClient blockBlobAsyncClient; @Mock private BlobAsyncClient blobAsyncClient; @Before public void setup() { MockitoAnnotations.initMocks(this); } @Test public void testListOwnerShip() { BlobPartitionManager blobPartitionManager = new BlobPartitionManager(containerAsyncClient); BlobItem blobItem = getBlobItem("owner1", "1", "230", "etag", "eh/cg/0"); PagedFlux<BlobItem> response = new PagedFlux<BlobItem>(() -> Mono.just(new PagedResponseBase<HttpHeaders, BlobItem>(null, 200, null, Arrays.asList(blobItem), null, null))); when(containerAsyncClient.listBlobsFlat(any(ListBlobsOptions.class))).thenReturn(response); StepVerifier.create(blobPartitionManager.listOwnership("eh", "cg")) .assertNext(partitionOwnership -> { assertEquals("owner1", partitionOwnership.ownerId()); assertEquals("0", partitionOwnership.partitionId()); assertEquals(1, (long) partitionOwnership.sequenceNumber()); assertEquals("230", partitionOwnership.offset()); assertEquals("eh", partitionOwnership.eventHubName()); assertEquals("cg", partitionOwnership.consumerGroupName()); assertEquals("etag", partitionOwnership.eTag()); }).verifyComplete(); } @Test public void testUpdateCheckpoint() { Checkpoint checkpoint = new Checkpoint() .eventHubName("eh") .consumerGroupName("cg") .ownerId("owner1") .partitionId("0") .eTag("etag") .sequenceNumber(2L) .offset("100"); Map<String, String> headers = new HashMap<>(); headers.put("eTag", "etag2"); when(containerAsyncClient.getBlobAsyncClient("eh/cg/0")).thenReturn(blobAsyncClient); when(blobAsyncClient.setMetadataWithResponse(any(Metadata.class), any(BlobAccessConditions.class))) .thenReturn(Mono.just(new VoidResponse(null, 200, new HttpHeaders(headers)))); BlobPartitionManager blobPartitionManager = new BlobPartitionManager(containerAsyncClient); StepVerifier.create(blobPartitionManager.updateCheckpoint(checkpoint)) .assertNext(etag -> assertEquals("etag2", etag)).verifyComplete(); } @Test public void testClaimOwnership() { PartitionOwnership po = createPartitionOwnership("eh", "cg", "0", "owner1"); HttpHeaders httpHeaders = new HttpHeaders(); httpHeaders.put("eTag", "etag2"); when(containerAsyncClient.getBlobAsyncClient("eh/cg/0")).thenReturn(blobAsyncClient); when(blobAsyncClient.asBlockBlobAsyncClient()).thenReturn(blockBlobAsyncClient); when(blockBlobAsyncClient.uploadWithResponse(any(Flux.class), eq(0L), isNull(), any(Metadata.class), any(BlobAccessConditions.class))) .thenReturn(Mono.just(new ResponseBase(null, 200, httpHeaders, null, null))); BlobPartitionManager blobPartitionManager = new BlobPartitionManager(containerAsyncClient); StepVerifier.create(blobPartitionManager.claimOwnership(po)) .assertNext(partitionOwnership -> { assertEquals("owner1", partitionOwnership.ownerId()); assertEquals("0", partitionOwnership.partitionId()); assertEquals("eh", partitionOwnership.eventHubName()); assertEquals("cg", partitionOwnership.consumerGroupName()); assertEquals("etag2", partitionOwnership.eTag()); assertNull(partitionOwnership.sequenceNumber()); assertNull(partitionOwnership.offset()); }).verifyComplete(); } private PartitionOwnership createPartitionOwnership(String eventHubName, String consumerGroupName, String partitionId, String ownerId) { return new PartitionOwnership() .eventHubName(eventHubName) .consumerGroupName(consumerGroupName) .partitionId(partitionId) .ownerId(ownerId); } private BlobItem getBlobItem(String owner, String sequenceNumber, String offset, String etag, String blobName) { Metadata metadata = getMetadata(owner, sequenceNumber, offset); BlobProperties properties = new BlobProperties() .lastModified(OffsetDateTime.now()) .etag(etag); return new BlobItem() .name(blobName) .metadata(metadata) .properties(properties); } }
class BlobPartitionManagerTest { @Mock private ContainerAsyncClient containerAsyncClient; @Mock private BlockBlobAsyncClient blockBlobAsyncClient; @Mock private BlobAsyncClient blobAsyncClient; @Before public void setup() { MockitoAnnotations.initMocks(this); } @Test public void testListOwnerShip() { BlobPartitionManager blobPartitionManager = new BlobPartitionManager(containerAsyncClient); BlobItem blobItem = getBlobItem("owner1", "1", "230", "etag", "eh/cg/0"); PagedFlux<BlobItem> response = new PagedFlux<BlobItem>(() -> Mono.just(new PagedResponseBase<HttpHeaders, BlobItem>(null, 200, null, Arrays.asList(blobItem), null, null))); when(containerAsyncClient.listBlobsFlat(any(ListBlobsOptions.class))).thenReturn(response); StepVerifier.create(blobPartitionManager.listOwnership("eh", "cg")) .assertNext(partitionOwnership -> { assertEquals("owner1", partitionOwnership.ownerId()); assertEquals("0", partitionOwnership.partitionId()); assertEquals(1, (long) partitionOwnership.sequenceNumber()); assertEquals(230, (long) partitionOwnership.offset()); assertEquals("eh", partitionOwnership.eventHubName()); assertEquals("cg", partitionOwnership.consumerGroupName()); assertEquals("etag", partitionOwnership.eTag()); }).verifyComplete(); } @Test public void testUpdateCheckpoint() { Checkpoint checkpoint = new Checkpoint() .eventHubName("eh") .consumerGroupName("cg") .ownerId("owner1") .partitionId("0") .eTag("etag") .sequenceNumber(2L) .offset(100L); Map<String, String> headers = new HashMap<>(); headers.put("eTag", "etag2"); when(containerAsyncClient.getBlobAsyncClient("eh/cg/0")).thenReturn(blobAsyncClient); when(blobAsyncClient.setMetadataWithResponse(any(Metadata.class), any(BlobAccessConditions.class))) .thenReturn(Mono.just(new VoidResponse(null, 200, new HttpHeaders(headers)))); BlobPartitionManager blobPartitionManager = new BlobPartitionManager(containerAsyncClient); StepVerifier.create(blobPartitionManager.updateCheckpoint(checkpoint)) .assertNext(etag -> assertEquals("etag2", etag)).verifyComplete(); } @SuppressWarnings("unchecked") @Test public void testClaimOwnership() { PartitionOwnership po = createPartitionOwnership("eh", "cg", "0", "owner1"); HttpHeaders httpHeaders = new HttpHeaders(); httpHeaders.put("eTag", "etag2"); when(containerAsyncClient.getBlobAsyncClient("eh/cg/0")).thenReturn(blobAsyncClient); when(blobAsyncClient.asBlockBlobAsyncClient()).thenReturn(blockBlobAsyncClient); when(blockBlobAsyncClient.uploadWithResponse(ArgumentMatchers.<Flux<ByteBuffer>>any(), eq(0L), isNull(), any(Metadata.class), any(BlobAccessConditions.class))) .thenReturn(Mono.just(new ResponseBase<>(null, 200, httpHeaders, null, null))); BlobPartitionManager blobPartitionManager = new BlobPartitionManager(containerAsyncClient); StepVerifier.create(blobPartitionManager.claimOwnership(po)) .assertNext(partitionOwnership -> { assertEquals("owner1", partitionOwnership.ownerId()); assertEquals("0", partitionOwnership.partitionId()); assertEquals("eh", partitionOwnership.eventHubName()); assertEquals("cg", partitionOwnership.consumerGroupName()); assertEquals("etag2", partitionOwnership.eTag()); assertNull(partitionOwnership.sequenceNumber()); assertNull(partitionOwnership.offset()); }).verifyComplete(); } @Test public void testListOwnershipError() { BlobPartitionManager blobPartitionManager = new BlobPartitionManager(containerAsyncClient); PagedFlux<BlobItem> response = new PagedFlux<>(() -> Mono.error(new SocketTimeoutException())); when(containerAsyncClient.listBlobsFlat(any(ListBlobsOptions.class))).thenReturn(response); StepVerifier.create(blobPartitionManager.listOwnership("eh", "cg")) .expectError(SocketTimeoutException.class).verify(); } @Test public void testUpdateCheckpointError() { Checkpoint checkpoint = new Checkpoint() .eventHubName("eh") .consumerGroupName("cg") .ownerId("owner1") .partitionId("0") .eTag("etag") .sequenceNumber(2L) .offset(100L); Map<String, String> headers = new HashMap<>(); headers.put("eTag", "etag2"); when(containerAsyncClient.getBlobAsyncClient("eh/cg/0")).thenReturn(blobAsyncClient); when(blobAsyncClient.setMetadataWithResponse(any(Metadata.class), any(BlobAccessConditions.class))) .thenReturn(Mono.error(new SocketTimeoutException())); BlobPartitionManager blobPartitionManager = new BlobPartitionManager(containerAsyncClient); StepVerifier.create(blobPartitionManager.updateCheckpoint(checkpoint)) .expectError(SocketTimeoutException.class).verify(); } @Test public void testFailedOwnershipClaim() { PartitionOwnership po = createPartitionOwnership("eh", "cg", "0", "owner1"); HttpHeaders httpHeaders = new HttpHeaders(); httpHeaders.put("eTag", "etag2"); when(containerAsyncClient.getBlobAsyncClient("eh/cg/0")).thenReturn(blobAsyncClient); when(blobAsyncClient.asBlockBlobAsyncClient()).thenReturn(blockBlobAsyncClient); when(blockBlobAsyncClient.uploadWithResponse(ArgumentMatchers.<Flux<ByteBuffer>>any(), eq(0L), isNull(), any(Metadata.class), any(BlobAccessConditions.class))) .thenReturn(Mono.error(new ResourceModifiedException("Etag did not match", null))); BlobPartitionManager blobPartitionManager = new BlobPartitionManager(containerAsyncClient); StepVerifier.create(blobPartitionManager.claimOwnership(po)).verifyComplete(); } private PartitionOwnership createPartitionOwnership(String eventHubName, String consumerGroupName, String partitionId, String ownerId) { return new PartitionOwnership() .eventHubName(eventHubName) .consumerGroupName(consumerGroupName) .partitionId(partitionId) .ownerId(ownerId); } private BlobItem getBlobItem(String owner, String sequenceNumber, String offset, String etag, String blobName) { Metadata metadata = getMetadata(owner, sequenceNumber, offset); BlobProperties properties = new BlobProperties() .lastModified(OffsetDateTime.now()) .etag(etag); return new BlobItem() .name(blobName) .metadata(metadata) .properties(properties); } }
Yeah, I discussed this with Shivangi and we will all have the same name for interoperability. I have a comment in the source class to use the same keys in all languages.
private Metadata getMetadata(String owner, String sequenceNumber, String offset) { Metadata metadata = new Metadata(); metadata.put("ownerId", owner); metadata.put("sequenceNumber", sequenceNumber); metadata.put("offset", offset); return metadata; }
Metadata metadata = new Metadata();
private Metadata getMetadata(String owner, String sequenceNumber, String offset) { Metadata metadata = new Metadata(); metadata.put("OwnerId", owner); metadata.put("SequenceNumber", sequenceNumber); metadata.put("Offset", offset); return metadata; }
class BlobPartitionManagerTest { @Mock private ContainerAsyncClient containerAsyncClient; @Mock private BlockBlobAsyncClient blockBlobAsyncClient; @Mock private BlobAsyncClient blobAsyncClient; @Before public void setup() { MockitoAnnotations.initMocks(this); } @Test public void testListOwnerShip() { BlobPartitionManager blobPartitionManager = new BlobPartitionManager(containerAsyncClient); BlobItem blobItem = getBlobItem("owner1", "1", "230", "etag", "eh/cg/0"); PagedFlux<BlobItem> response = new PagedFlux<BlobItem>(() -> Mono.just(new PagedResponseBase<HttpHeaders, BlobItem>(null, 200, null, Arrays.asList(blobItem), null, null))); when(containerAsyncClient.listBlobsFlat(any(ListBlobsOptions.class))).thenReturn(response); StepVerifier.create(blobPartitionManager.listOwnership("eh", "cg")) .assertNext(partitionOwnership -> { assertEquals("owner1", partitionOwnership.ownerId()); assertEquals("0", partitionOwnership.partitionId()); assertEquals(1, (long) partitionOwnership.sequenceNumber()); assertEquals("230", partitionOwnership.offset()); assertEquals("eh", partitionOwnership.eventHubName()); assertEquals("cg", partitionOwnership.consumerGroupName()); assertEquals("etag", partitionOwnership.eTag()); }).verifyComplete(); } @Test public void testUpdateCheckpoint() { Checkpoint checkpoint = new Checkpoint() .eventHubName("eh") .consumerGroupName("cg") .ownerId("owner1") .partitionId("0") .eTag("etag") .sequenceNumber(2L) .offset("100"); Map<String, String> headers = new HashMap<>(); headers.put("eTag", "etag2"); when(containerAsyncClient.getBlobAsyncClient("eh/cg/0")).thenReturn(blobAsyncClient); when(blobAsyncClient.setMetadataWithResponse(any(Metadata.class), any(BlobAccessConditions.class))) .thenReturn(Mono.just(new VoidResponse(null, 200, new HttpHeaders(headers)))); BlobPartitionManager blobPartitionManager = new BlobPartitionManager(containerAsyncClient); StepVerifier.create(blobPartitionManager.updateCheckpoint(checkpoint)) .assertNext(etag -> assertEquals("etag2", etag)).verifyComplete(); } @Test public void testClaimOwnership() { PartitionOwnership po = createPartitionOwnership("eh", "cg", "0", "owner1"); HttpHeaders httpHeaders = new HttpHeaders(); httpHeaders.put("eTag", "etag2"); when(containerAsyncClient.getBlobAsyncClient("eh/cg/0")).thenReturn(blobAsyncClient); when(blobAsyncClient.asBlockBlobAsyncClient()).thenReturn(blockBlobAsyncClient); when(blockBlobAsyncClient.uploadWithResponse(any(Flux.class), eq(0L), isNull(), any(Metadata.class), any(BlobAccessConditions.class))) .thenReturn(Mono.just(new ResponseBase(null, 200, httpHeaders, null, null))); BlobPartitionManager blobPartitionManager = new BlobPartitionManager(containerAsyncClient); StepVerifier.create(blobPartitionManager.claimOwnership(po)) .assertNext(partitionOwnership -> { assertEquals("owner1", partitionOwnership.ownerId()); assertEquals("0", partitionOwnership.partitionId()); assertEquals("eh", partitionOwnership.eventHubName()); assertEquals("cg", partitionOwnership.consumerGroupName()); assertEquals("etag2", partitionOwnership.eTag()); assertNull(partitionOwnership.sequenceNumber()); assertNull(partitionOwnership.offset()); }).verifyComplete(); } private PartitionOwnership createPartitionOwnership(String eventHubName, String consumerGroupName, String partitionId, String ownerId) { return new PartitionOwnership() .eventHubName(eventHubName) .consumerGroupName(consumerGroupName) .partitionId(partitionId) .ownerId(ownerId); } private BlobItem getBlobItem(String owner, String sequenceNumber, String offset, String etag, String blobName) { Metadata metadata = getMetadata(owner, sequenceNumber, offset); BlobProperties properties = new BlobProperties() .lastModified(OffsetDateTime.now()) .etag(etag); return new BlobItem() .name(blobName) .metadata(metadata) .properties(properties); } }
class BlobPartitionManagerTest { @Mock private ContainerAsyncClient containerAsyncClient; @Mock private BlockBlobAsyncClient blockBlobAsyncClient; @Mock private BlobAsyncClient blobAsyncClient; @Before public void setup() { MockitoAnnotations.initMocks(this); } @Test public void testListOwnerShip() { BlobPartitionManager blobPartitionManager = new BlobPartitionManager(containerAsyncClient); BlobItem blobItem = getBlobItem("owner1", "1", "230", "etag", "eh/cg/0"); PagedFlux<BlobItem> response = new PagedFlux<BlobItem>(() -> Mono.just(new PagedResponseBase<HttpHeaders, BlobItem>(null, 200, null, Arrays.asList(blobItem), null, null))); when(containerAsyncClient.listBlobsFlat(any(ListBlobsOptions.class))).thenReturn(response); StepVerifier.create(blobPartitionManager.listOwnership("eh", "cg")) .assertNext(partitionOwnership -> { assertEquals("owner1", partitionOwnership.ownerId()); assertEquals("0", partitionOwnership.partitionId()); assertEquals(1, (long) partitionOwnership.sequenceNumber()); assertEquals(230, (long) partitionOwnership.offset()); assertEquals("eh", partitionOwnership.eventHubName()); assertEquals("cg", partitionOwnership.consumerGroupName()); assertEquals("etag", partitionOwnership.eTag()); }).verifyComplete(); } @Test public void testUpdateCheckpoint() { Checkpoint checkpoint = new Checkpoint() .eventHubName("eh") .consumerGroupName("cg") .ownerId("owner1") .partitionId("0") .eTag("etag") .sequenceNumber(2L) .offset(100L); Map<String, String> headers = new HashMap<>(); headers.put("eTag", "etag2"); when(containerAsyncClient.getBlobAsyncClient("eh/cg/0")).thenReturn(blobAsyncClient); when(blobAsyncClient.setMetadataWithResponse(any(Metadata.class), any(BlobAccessConditions.class))) .thenReturn(Mono.just(new VoidResponse(null, 200, new HttpHeaders(headers)))); BlobPartitionManager blobPartitionManager = new BlobPartitionManager(containerAsyncClient); StepVerifier.create(blobPartitionManager.updateCheckpoint(checkpoint)) .assertNext(etag -> assertEquals("etag2", etag)).verifyComplete(); } @SuppressWarnings("unchecked") @Test public void testClaimOwnership() { PartitionOwnership po = createPartitionOwnership("eh", "cg", "0", "owner1"); HttpHeaders httpHeaders = new HttpHeaders(); httpHeaders.put("eTag", "etag2"); when(containerAsyncClient.getBlobAsyncClient("eh/cg/0")).thenReturn(blobAsyncClient); when(blobAsyncClient.asBlockBlobAsyncClient()).thenReturn(blockBlobAsyncClient); when(blockBlobAsyncClient.uploadWithResponse(ArgumentMatchers.<Flux<ByteBuffer>>any(), eq(0L), isNull(), any(Metadata.class), any(BlobAccessConditions.class))) .thenReturn(Mono.just(new ResponseBase<>(null, 200, httpHeaders, null, null))); BlobPartitionManager blobPartitionManager = new BlobPartitionManager(containerAsyncClient); StepVerifier.create(blobPartitionManager.claimOwnership(po)) .assertNext(partitionOwnership -> { assertEquals("owner1", partitionOwnership.ownerId()); assertEquals("0", partitionOwnership.partitionId()); assertEquals("eh", partitionOwnership.eventHubName()); assertEquals("cg", partitionOwnership.consumerGroupName()); assertEquals("etag2", partitionOwnership.eTag()); assertNull(partitionOwnership.sequenceNumber()); assertNull(partitionOwnership.offset()); }).verifyComplete(); } @Test public void testListOwnershipError() { BlobPartitionManager blobPartitionManager = new BlobPartitionManager(containerAsyncClient); PagedFlux<BlobItem> response = new PagedFlux<>(() -> Mono.error(new SocketTimeoutException())); when(containerAsyncClient.listBlobsFlat(any(ListBlobsOptions.class))).thenReturn(response); StepVerifier.create(blobPartitionManager.listOwnership("eh", "cg")) .expectError(SocketTimeoutException.class).verify(); } @Test public void testUpdateCheckpointError() { Checkpoint checkpoint = new Checkpoint() .eventHubName("eh") .consumerGroupName("cg") .ownerId("owner1") .partitionId("0") .eTag("etag") .sequenceNumber(2L) .offset(100L); Map<String, String> headers = new HashMap<>(); headers.put("eTag", "etag2"); when(containerAsyncClient.getBlobAsyncClient("eh/cg/0")).thenReturn(blobAsyncClient); when(blobAsyncClient.setMetadataWithResponse(any(Metadata.class), any(BlobAccessConditions.class))) .thenReturn(Mono.error(new SocketTimeoutException())); BlobPartitionManager blobPartitionManager = new BlobPartitionManager(containerAsyncClient); StepVerifier.create(blobPartitionManager.updateCheckpoint(checkpoint)) .expectError(SocketTimeoutException.class).verify(); } @Test public void testFailedOwnershipClaim() { PartitionOwnership po = createPartitionOwnership("eh", "cg", "0", "owner1"); HttpHeaders httpHeaders = new HttpHeaders(); httpHeaders.put("eTag", "etag2"); when(containerAsyncClient.getBlobAsyncClient("eh/cg/0")).thenReturn(blobAsyncClient); when(blobAsyncClient.asBlockBlobAsyncClient()).thenReturn(blockBlobAsyncClient); when(blockBlobAsyncClient.uploadWithResponse(ArgumentMatchers.<Flux<ByteBuffer>>any(), eq(0L), isNull(), any(Metadata.class), any(BlobAccessConditions.class))) .thenReturn(Mono.error(new ResourceModifiedException("Etag did not match", null))); BlobPartitionManager blobPartitionManager = new BlobPartitionManager(containerAsyncClient); StepVerifier.create(blobPartitionManager.claimOwnership(po)).verifyComplete(); } private PartitionOwnership createPartitionOwnership(String eventHubName, String consumerGroupName, String partitionId, String ownerId) { return new PartitionOwnership() .eventHubName(eventHubName) .consumerGroupName(consumerGroupName) .partitionId(partitionId) .ownerId(ownerId); } private BlobItem getBlobItem(String owner, String sequenceNumber, String offset, String etag, String blobName) { Metadata metadata = getMetadata(owner, sequenceNumber, offset); BlobProperties properties = new BlobProperties() .lastModified(OffsetDateTime.now()) .etag(etag); return new BlobItem() .name(blobName) .metadata(metadata) .properties(properties); } }
Yeah, it did. Fixed now.
public Mono<String> updateCheckpoint(Checkpoint checkpoint) { if (checkpoint.sequenceNumber() == null && checkpoint.offset() == null) { throw logger.logExceptionAsWarning(Exceptions .propagate(new IllegalStateException( "Both sequence number and offset cannot be null when updating a checkpoint"))); } String partitionId = checkpoint.partitionId(); String blobName = getBlobName(checkpoint.eventHubName(), checkpoint.consumerGroupName(), partitionId); if (!blobClients.containsKey(blobName)) { blobClients.put(blobName, containerAsyncClient.getBlobAsyncClient(blobName)); } Metadata metadata = new Metadata(); String sequenceNumber = checkpoint.sequenceNumber() == null ? null : String.valueOf(checkpoint.sequenceNumber()); metadata.put(SEQUENCE_NUMBER, sequenceNumber); metadata.put(OFFSET, checkpoint.offset()); metadata.put(OWNER_ID, checkpoint.ownerId()); BlobAsyncClient blobAsyncClient = blobClients.get(blobName); BlobAccessConditions blobAccessConditions = new BlobAccessConditions() .modifiedAccessConditions(new ModifiedAccessConditions().ifMatch(checkpoint.eTag())); return blobAsyncClient.setMetadataWithResponse(metadata, blobAccessConditions) .map(response -> response.headers().get(ETAG).value()); }
String sequenceNumber = checkpoint.sequenceNumber() == null ? null :
public Mono<String> updateCheckpoint(Checkpoint checkpoint) { if (checkpoint.sequenceNumber() == null && checkpoint.offset() == null) { throw logger.logExceptionAsWarning(Exceptions .propagate(new IllegalStateException( "Both sequence number and offset cannot be null when updating a checkpoint"))); } String partitionId = checkpoint.partitionId(); String blobName = getBlobName(checkpoint.eventHubName(), checkpoint.consumerGroupName(), partitionId); if (!blobClients.containsKey(blobName)) { blobClients.put(blobName, containerAsyncClient.getBlobAsyncClient(blobName)); } Metadata metadata = new Metadata(); String sequenceNumber = checkpoint.sequenceNumber() == null ? null : String.valueOf(checkpoint.sequenceNumber()); String offset = checkpoint.offset() == null ? null : String.valueOf(checkpoint.offset()); metadata.put(SEQUENCE_NUMBER, sequenceNumber); metadata.put(OFFSET, offset); metadata.put(OWNER_ID, checkpoint.ownerId()); BlobAsyncClient blobAsyncClient = blobClients.get(blobName); BlobAccessConditions blobAccessConditions = new BlobAccessConditions() .modifiedAccessConditions(new ModifiedAccessConditions().ifMatch(checkpoint.eTag())); return blobAsyncClient.setMetadataWithResponse(metadata, blobAccessConditions) .map(response -> response.headers().get(ETAG).value()); }
class BlobPartitionManager implements PartitionManager { private static final String SEQUENCE_NUMBER = "sequenceNumber"; private static final String OFFSET = "offset"; private static final String OWNER_ID = "ownerId"; private static final String ETAG = "eTag"; private static final String BLOB_PATH_SEPARATOR = "/"; private static final ByteBuffer UPLOAD_DATA = ByteBuffer.wrap("" .getBytes(UTF_8)); private final ContainerAsyncClient containerAsyncClient; private final ClientLogger logger = new ClientLogger(BlobPartitionManager.class); private final Map<String, BlobAsyncClient> blobClients = new ConcurrentHashMap<>(); /** * Creates an instance of BlobPartitionManager. * * @param containerAsyncClient The {@link ContainerAsyncClient} this instance will use to read and update blobs in * the storage container. */ public BlobPartitionManager(ContainerAsyncClient containerAsyncClient) { this.containerAsyncClient = containerAsyncClient; } /** * {@inheritDoc} * * @param eventHubName The Event Hub name to get ownership information. * @param consumerGroupName The consumer group name to get ownership information. * @return A {@link Flux} of current partition ownership information. */ @Override public Flux<PartitionOwnership> listOwnership(String eventHubName, String consumerGroupName) { String prefix = getBlobPrefix(eventHubName, consumerGroupName); BlobListDetails details = new BlobListDetails().metadata(true); ListBlobsOptions options = new ListBlobsOptions().prefix(prefix).details(details); return containerAsyncClient.listBlobsFlat(options) .filter(blobItem -> blobItem.name().split(BLOB_PATH_SEPARATOR).length == 3) .map(this::convertToPartitionOwnership); } /** * {@inheritDoc} * * @param requestedPartitionOwnerships Array of partition ownerships this instance is requesting to own. * @return A {@link Flux} of successfully claimed ownership of partitions. */ @Override public Flux<PartitionOwnership> claimOwnership(PartitionOwnership... requestedPartitionOwnerships) { return Flux.fromArray(requestedPartitionOwnerships).flatMap( partitionOwnership -> { String partitionId = partitionOwnership.partitionId(); String blobName = getBlobName(partitionOwnership.eventHubName(), partitionOwnership.consumerGroupName(), partitionId); if (!blobClients.containsKey(blobName)) { blobClients.put(blobName, containerAsyncClient.getBlobAsyncClient(blobName)); } BlobAsyncClient blobAsyncClient = blobClients.get(blobName); Metadata metadata = new Metadata(); metadata.put(OWNER_ID, partitionOwnership.ownerId()); metadata.put(OFFSET, partitionOwnership.offset()); Long sequenceNumber = partitionOwnership.sequenceNumber(); metadata.put(SEQUENCE_NUMBER, sequenceNumber == null ? null : String.valueOf(sequenceNumber)); BlobAccessConditions blobAccessConditions = new BlobAccessConditions(); if (ImplUtils.isNullOrEmpty(partitionOwnership.eTag())) { blobAccessConditions.modifiedAccessConditions(new ModifiedAccessConditions() .ifNoneMatch("*")); return blobAsyncClient.asBlockBlobAsyncClient() .uploadWithResponse(Flux.just(UPLOAD_DATA), 0, null, metadata, blobAccessConditions) .flatMapMany(response -> { partitionOwnership.eTag(response.headers().get(ETAG).value()); return Mono.just(partitionOwnership); }, error -> { logger.info("Couldn't claim ownership of partition {}, error {}", partitionId, error.getMessage()); return Mono.empty(); }, Mono::empty); } else { blobAccessConditions.modifiedAccessConditions(new ModifiedAccessConditions() .ifMatch(partitionOwnership.eTag())); return blobAsyncClient.setMetadataWithResponse(metadata, blobAccessConditions) .flatMapMany(response -> { partitionOwnership.eTag(response.headers().get(ETAG).value()); return Mono.just(partitionOwnership); }, error -> { logger.info("Couldn't claim ownership of partition {}, error {}", partitionId, error.getMessage()); return Mono.empty(); }, () -> Mono.empty()); } } ); } /** * {@inheritDoc} * * @param checkpoint Checkpoint information containing sequence number and offset to be stored for this partition. * @return A {@link Mono} containing the new ETag generated from a successful checkpoint update. */ @Override private String getBlobPrefix(String eventHubName, String consumerGroupName) { return eventHubName + BLOB_PATH_SEPARATOR + consumerGroupName; } private String getBlobName(String eventHubName, String consumerGroupName, String partitionId) { return eventHubName + BLOB_PATH_SEPARATOR + consumerGroupName + BLOB_PATH_SEPARATOR + partitionId; } private PartitionOwnership convertToPartitionOwnership(BlobItem blobItem) { PartitionOwnership partitionOwnership = new PartitionOwnership(); logger.info("Found blob for partition {}", blobItem.name()); String[] names = blobItem.name().split(BLOB_PATH_SEPARATOR); partitionOwnership.eventHubName(names[0]); partitionOwnership.consumerGroupName(names[1]); partitionOwnership.partitionId(names[2]); if (ImplUtils.isNullOrEmpty(blobItem.metadata())) { logger.warning("No metadata available for blob {}", blobItem.name()); return partitionOwnership; } blobItem.metadata().entrySet().stream() .forEach(entry -> { switch (entry.getKey()) { case OWNER_ID: partitionOwnership.ownerId(entry.getValue()); break; case SEQUENCE_NUMBER: partitionOwnership.sequenceNumber(Long.valueOf(entry.getValue())); break; case OFFSET: partitionOwnership.offset(entry.getValue()); break; default: break; } }); BlobProperties blobProperties = blobItem.properties(); partitionOwnership.lastModifiedTime(blobProperties.lastModified().toInstant().toEpochMilli()); partitionOwnership.eTag(blobProperties.etag()); return partitionOwnership; } }
class BlobPartitionManager implements PartitionManager { private static final String SEQUENCE_NUMBER = "SequenceNumber"; private static final String OFFSET = "Offset"; private static final String OWNER_ID = "OwnerId"; private static final String ETAG = "eTag"; private static final String BLOB_PATH_SEPARATOR = "/"; private static final ByteBuffer UPLOAD_DATA = ByteBuffer.wrap("" .getBytes(UTF_8)); private final ContainerAsyncClient containerAsyncClient; private final ClientLogger logger = new ClientLogger(BlobPartitionManager.class); private final Map<String, BlobAsyncClient> blobClients = new ConcurrentHashMap<>(); /** * Creates an instance of BlobPartitionManager. * * @param containerAsyncClient The {@link ContainerAsyncClient} this instance will use to read and update blobs in * the storage container. */ public BlobPartitionManager(ContainerAsyncClient containerAsyncClient) { this.containerAsyncClient = containerAsyncClient; } /** * This method is called by the {@link EventProcessor} to get the list of all existing partition ownership from the * Storage Blobs. Could return empty results if there are is no existing ownership information. * * @param eventHubName The Event Hub name to get ownership information. * @param consumerGroupName The consumer group name. * @return A flux of partition ownership details of all the partitions that have/had an owner. */ @Override public Flux<PartitionOwnership> listOwnership(String eventHubName, String consumerGroupName) { String prefix = getBlobPrefix(eventHubName, consumerGroupName); BlobListDetails details = new BlobListDetails().metadata(true); ListBlobsOptions options = new ListBlobsOptions().prefix(prefix).details(details); return containerAsyncClient.listBlobsFlat(options) .filter(blobItem -> blobItem.name().split(BLOB_PATH_SEPARATOR).length == 3) .map(this::convertToPartitionOwnership); } /** * This method is called by the {@link EventProcessor} to claim ownership of a list of partitions. This will return * the list of partitions that were owned successfully. * * @param requestedPartitionOwnerships Array of partition ownerships this instance is requesting to own. * @return A flux of partitions this instance successfully claimed ownership. */ @Override public Flux<PartitionOwnership> claimOwnership(PartitionOwnership... requestedPartitionOwnerships) { return Flux.fromArray(requestedPartitionOwnerships).flatMap( partitionOwnership -> { String partitionId = partitionOwnership.partitionId(); String blobName = getBlobName(partitionOwnership.eventHubName(), partitionOwnership.consumerGroupName(), partitionId); if (!blobClients.containsKey(blobName)) { blobClients.put(blobName, containerAsyncClient.getBlobAsyncClient(blobName)); } BlobAsyncClient blobAsyncClient = blobClients.get(blobName); Metadata metadata = new Metadata(); metadata.put(OWNER_ID, partitionOwnership.ownerId()); Long offset = partitionOwnership.offset(); metadata.put(OFFSET, offset == null ? null : String.valueOf(offset)); Long sequenceNumber = partitionOwnership.sequenceNumber(); metadata.put(SEQUENCE_NUMBER, sequenceNumber == null ? null : String.valueOf(sequenceNumber)); BlobAccessConditions blobAccessConditions = new BlobAccessConditions(); if (ImplUtils.isNullOrEmpty(partitionOwnership.eTag())) { blobAccessConditions.modifiedAccessConditions(new ModifiedAccessConditions() .ifNoneMatch("*")); return blobAsyncClient.asBlockBlobAsyncClient() .uploadWithResponse(Flux.just(UPLOAD_DATA), 0, null, metadata, blobAccessConditions) .flatMapMany(response -> { partitionOwnership.eTag(response.headers().get(ETAG).value()); return Mono.just(partitionOwnership); }, error -> { logger.info("Couldn't claim ownership of partition {}, error {}", partitionId, error.getMessage()); return Mono.empty(); }, Mono::empty); } else { blobAccessConditions.modifiedAccessConditions(new ModifiedAccessConditions() .ifMatch(partitionOwnership.eTag())); return blobAsyncClient.setMetadataWithResponse(metadata, blobAccessConditions) .flatMapMany(response -> { partitionOwnership.eTag(response.headers().get(ETAG).value()); return Mono.just(partitionOwnership); }, error -> { logger.info("Couldn't claim ownership of partition {}, error {}", partitionId, error.getMessage()); return Mono.empty(); }, () -> Mono.empty()); } } ); } /** * Updates the checkpoint in Storage Blobs for a partition. * * @param checkpoint Checkpoint information containing sequence number and offset to be stored for this partition. * @return The new ETag on successful update. */ @Override private String getBlobPrefix(String eventHubName, String consumerGroupName) { return eventHubName + BLOB_PATH_SEPARATOR + consumerGroupName; } private String getBlobName(String eventHubName, String consumerGroupName, String partitionId) { return eventHubName + BLOB_PATH_SEPARATOR + consumerGroupName + BLOB_PATH_SEPARATOR + partitionId; } private PartitionOwnership convertToPartitionOwnership(BlobItem blobItem) { PartitionOwnership partitionOwnership = new PartitionOwnership(); logger.info("Found blob for partition {}", blobItem.name()); String[] names = blobItem.name().split(BLOB_PATH_SEPARATOR); partitionOwnership.eventHubName(names[0]); partitionOwnership.consumerGroupName(names[1]); partitionOwnership.partitionId(names[2]); if (ImplUtils.isNullOrEmpty(blobItem.metadata())) { logger.warning("No metadata available for blob {}", blobItem.name()); return partitionOwnership; } blobItem.metadata().entrySet().stream() .forEach(entry -> { switch (entry.getKey()) { case OWNER_ID: partitionOwnership.ownerId(entry.getValue()); break; case SEQUENCE_NUMBER: partitionOwnership.sequenceNumber(Long.valueOf(entry.getValue())); break; case OFFSET: partitionOwnership.offset(Long.valueOf(entry.getValue())); break; default: break; } }); BlobProperties blobProperties = blobItem.properties(); partitionOwnership.lastModifiedTime(blobProperties.lastModified().toInstant().toEpochMilli()); partitionOwnership.eTag(blobProperties.etag()); return partitionOwnership; } }
Also, started a thread on Teams to finalize the name.
private Metadata getMetadata(String owner, String sequenceNumber, String offset) { Metadata metadata = new Metadata(); metadata.put("ownerId", owner); metadata.put("sequenceNumber", sequenceNumber); metadata.put("offset", offset); return metadata; }
Metadata metadata = new Metadata();
private Metadata getMetadata(String owner, String sequenceNumber, String offset) { Metadata metadata = new Metadata(); metadata.put("OwnerId", owner); metadata.put("SequenceNumber", sequenceNumber); metadata.put("Offset", offset); return metadata; }
class BlobPartitionManagerTest { @Mock private ContainerAsyncClient containerAsyncClient; @Mock private BlockBlobAsyncClient blockBlobAsyncClient; @Mock private BlobAsyncClient blobAsyncClient; @Before public void setup() { MockitoAnnotations.initMocks(this); } @Test public void testListOwnerShip() { BlobPartitionManager blobPartitionManager = new BlobPartitionManager(containerAsyncClient); BlobItem blobItem = getBlobItem("owner1", "1", "230", "etag", "eh/cg/0"); PagedFlux<BlobItem> response = new PagedFlux<BlobItem>(() -> Mono.just(new PagedResponseBase<HttpHeaders, BlobItem>(null, 200, null, Arrays.asList(blobItem), null, null))); when(containerAsyncClient.listBlobsFlat(any(ListBlobsOptions.class))).thenReturn(response); StepVerifier.create(blobPartitionManager.listOwnership("eh", "cg")) .assertNext(partitionOwnership -> { assertEquals("owner1", partitionOwnership.ownerId()); assertEquals("0", partitionOwnership.partitionId()); assertEquals(1, (long) partitionOwnership.sequenceNumber()); assertEquals("230", partitionOwnership.offset()); assertEquals("eh", partitionOwnership.eventHubName()); assertEquals("cg", partitionOwnership.consumerGroupName()); assertEquals("etag", partitionOwnership.eTag()); }).verifyComplete(); } @Test public void testUpdateCheckpoint() { Checkpoint checkpoint = new Checkpoint() .eventHubName("eh") .consumerGroupName("cg") .ownerId("owner1") .partitionId("0") .eTag("etag") .sequenceNumber(2L) .offset("100"); Map<String, String> headers = new HashMap<>(); headers.put("eTag", "etag2"); when(containerAsyncClient.getBlobAsyncClient("eh/cg/0")).thenReturn(blobAsyncClient); when(blobAsyncClient.setMetadataWithResponse(any(Metadata.class), any(BlobAccessConditions.class))) .thenReturn(Mono.just(new VoidResponse(null, 200, new HttpHeaders(headers)))); BlobPartitionManager blobPartitionManager = new BlobPartitionManager(containerAsyncClient); StepVerifier.create(blobPartitionManager.updateCheckpoint(checkpoint)) .assertNext(etag -> assertEquals("etag2", etag)).verifyComplete(); } @Test public void testClaimOwnership() { PartitionOwnership po = createPartitionOwnership("eh", "cg", "0", "owner1"); HttpHeaders httpHeaders = new HttpHeaders(); httpHeaders.put("eTag", "etag2"); when(containerAsyncClient.getBlobAsyncClient("eh/cg/0")).thenReturn(blobAsyncClient); when(blobAsyncClient.asBlockBlobAsyncClient()).thenReturn(blockBlobAsyncClient); when(blockBlobAsyncClient.uploadWithResponse(any(Flux.class), eq(0L), isNull(), any(Metadata.class), any(BlobAccessConditions.class))) .thenReturn(Mono.just(new ResponseBase(null, 200, httpHeaders, null, null))); BlobPartitionManager blobPartitionManager = new BlobPartitionManager(containerAsyncClient); StepVerifier.create(blobPartitionManager.claimOwnership(po)) .assertNext(partitionOwnership -> { assertEquals("owner1", partitionOwnership.ownerId()); assertEquals("0", partitionOwnership.partitionId()); assertEquals("eh", partitionOwnership.eventHubName()); assertEquals("cg", partitionOwnership.consumerGroupName()); assertEquals("etag2", partitionOwnership.eTag()); assertNull(partitionOwnership.sequenceNumber()); assertNull(partitionOwnership.offset()); }).verifyComplete(); } private PartitionOwnership createPartitionOwnership(String eventHubName, String consumerGroupName, String partitionId, String ownerId) { return new PartitionOwnership() .eventHubName(eventHubName) .consumerGroupName(consumerGroupName) .partitionId(partitionId) .ownerId(ownerId); } private BlobItem getBlobItem(String owner, String sequenceNumber, String offset, String etag, String blobName) { Metadata metadata = getMetadata(owner, sequenceNumber, offset); BlobProperties properties = new BlobProperties() .lastModified(OffsetDateTime.now()) .etag(etag); return new BlobItem() .name(blobName) .metadata(metadata) .properties(properties); } }
class BlobPartitionManagerTest { @Mock private ContainerAsyncClient containerAsyncClient; @Mock private BlockBlobAsyncClient blockBlobAsyncClient; @Mock private BlobAsyncClient blobAsyncClient; @Before public void setup() { MockitoAnnotations.initMocks(this); } @Test public void testListOwnerShip() { BlobPartitionManager blobPartitionManager = new BlobPartitionManager(containerAsyncClient); BlobItem blobItem = getBlobItem("owner1", "1", "230", "etag", "eh/cg/0"); PagedFlux<BlobItem> response = new PagedFlux<BlobItem>(() -> Mono.just(new PagedResponseBase<HttpHeaders, BlobItem>(null, 200, null, Arrays.asList(blobItem), null, null))); when(containerAsyncClient.listBlobsFlat(any(ListBlobsOptions.class))).thenReturn(response); StepVerifier.create(blobPartitionManager.listOwnership("eh", "cg")) .assertNext(partitionOwnership -> { assertEquals("owner1", partitionOwnership.ownerId()); assertEquals("0", partitionOwnership.partitionId()); assertEquals(1, (long) partitionOwnership.sequenceNumber()); assertEquals(230, (long) partitionOwnership.offset()); assertEquals("eh", partitionOwnership.eventHubName()); assertEquals("cg", partitionOwnership.consumerGroupName()); assertEquals("etag", partitionOwnership.eTag()); }).verifyComplete(); } @Test public void testUpdateCheckpoint() { Checkpoint checkpoint = new Checkpoint() .eventHubName("eh") .consumerGroupName("cg") .ownerId("owner1") .partitionId("0") .eTag("etag") .sequenceNumber(2L) .offset(100L); Map<String, String> headers = new HashMap<>(); headers.put("eTag", "etag2"); when(containerAsyncClient.getBlobAsyncClient("eh/cg/0")).thenReturn(blobAsyncClient); when(blobAsyncClient.setMetadataWithResponse(any(Metadata.class), any(BlobAccessConditions.class))) .thenReturn(Mono.just(new VoidResponse(null, 200, new HttpHeaders(headers)))); BlobPartitionManager blobPartitionManager = new BlobPartitionManager(containerAsyncClient); StepVerifier.create(blobPartitionManager.updateCheckpoint(checkpoint)) .assertNext(etag -> assertEquals("etag2", etag)).verifyComplete(); } @SuppressWarnings("unchecked") @Test public void testClaimOwnership() { PartitionOwnership po = createPartitionOwnership("eh", "cg", "0", "owner1"); HttpHeaders httpHeaders = new HttpHeaders(); httpHeaders.put("eTag", "etag2"); when(containerAsyncClient.getBlobAsyncClient("eh/cg/0")).thenReturn(blobAsyncClient); when(blobAsyncClient.asBlockBlobAsyncClient()).thenReturn(blockBlobAsyncClient); when(blockBlobAsyncClient.uploadWithResponse(ArgumentMatchers.<Flux<ByteBuffer>>any(), eq(0L), isNull(), any(Metadata.class), any(BlobAccessConditions.class))) .thenReturn(Mono.just(new ResponseBase<>(null, 200, httpHeaders, null, null))); BlobPartitionManager blobPartitionManager = new BlobPartitionManager(containerAsyncClient); StepVerifier.create(blobPartitionManager.claimOwnership(po)) .assertNext(partitionOwnership -> { assertEquals("owner1", partitionOwnership.ownerId()); assertEquals("0", partitionOwnership.partitionId()); assertEquals("eh", partitionOwnership.eventHubName()); assertEquals("cg", partitionOwnership.consumerGroupName()); assertEquals("etag2", partitionOwnership.eTag()); assertNull(partitionOwnership.sequenceNumber()); assertNull(partitionOwnership.offset()); }).verifyComplete(); } @Test public void testListOwnershipError() { BlobPartitionManager blobPartitionManager = new BlobPartitionManager(containerAsyncClient); PagedFlux<BlobItem> response = new PagedFlux<>(() -> Mono.error(new SocketTimeoutException())); when(containerAsyncClient.listBlobsFlat(any(ListBlobsOptions.class))).thenReturn(response); StepVerifier.create(blobPartitionManager.listOwnership("eh", "cg")) .expectError(SocketTimeoutException.class).verify(); } @Test public void testUpdateCheckpointError() { Checkpoint checkpoint = new Checkpoint() .eventHubName("eh") .consumerGroupName("cg") .ownerId("owner1") .partitionId("0") .eTag("etag") .sequenceNumber(2L) .offset(100L); Map<String, String> headers = new HashMap<>(); headers.put("eTag", "etag2"); when(containerAsyncClient.getBlobAsyncClient("eh/cg/0")).thenReturn(blobAsyncClient); when(blobAsyncClient.setMetadataWithResponse(any(Metadata.class), any(BlobAccessConditions.class))) .thenReturn(Mono.error(new SocketTimeoutException())); BlobPartitionManager blobPartitionManager = new BlobPartitionManager(containerAsyncClient); StepVerifier.create(blobPartitionManager.updateCheckpoint(checkpoint)) .expectError(SocketTimeoutException.class).verify(); } @Test public void testFailedOwnershipClaim() { PartitionOwnership po = createPartitionOwnership("eh", "cg", "0", "owner1"); HttpHeaders httpHeaders = new HttpHeaders(); httpHeaders.put("eTag", "etag2"); when(containerAsyncClient.getBlobAsyncClient("eh/cg/0")).thenReturn(blobAsyncClient); when(blobAsyncClient.asBlockBlobAsyncClient()).thenReturn(blockBlobAsyncClient); when(blockBlobAsyncClient.uploadWithResponse(ArgumentMatchers.<Flux<ByteBuffer>>any(), eq(0L), isNull(), any(Metadata.class), any(BlobAccessConditions.class))) .thenReturn(Mono.error(new ResourceModifiedException("Etag did not match", null))); BlobPartitionManager blobPartitionManager = new BlobPartitionManager(containerAsyncClient); StepVerifier.create(blobPartitionManager.claimOwnership(po)).verifyComplete(); } private PartitionOwnership createPartitionOwnership(String eventHubName, String consumerGroupName, String partitionId, String ownerId) { return new PartitionOwnership() .eventHubName(eventHubName) .consumerGroupName(consumerGroupName) .partitionId(partitionId) .ownerId(ownerId); } private BlobItem getBlobItem(String owner, String sequenceNumber, String offset, String etag, String blobName) { Metadata metadata = getMetadata(owner, sequenceNumber, offset); BlobProperties properties = new BlobProperties() .lastModified(OffsetDateTime.now()) .etag(etag); return new BlobItem() .name(blobName) .metadata(metadata) .properties(properties); } }
Does that compile?
public static void main(String[] args) throws IllegalArgumentException { ZipkinTraceExporter.createAndRegister("http: TraceConfig traceConfig = Tracing.getTraceConfig(); TraceParams activeTraceParams = traceConfig.getActiveTraceParams(); traceConfig.updateActiveTraceParams(activeTraceParams.toBuilder().setSampler(Samplers.alwaysSample()).build()); Tracer tracer = Tracing.getTracer(); SecretAsyncClient client = new SecretClientBuilder() .endpoint("https: .credential(new DefaultAzureCredentialBuilder().build()) .buildAsyncClient(); try (Scope scope = tracer.spanBuilder("user-parent-span").startScopedSpan()) { Context traceContext = Context.of(OPENCENSUS_SPAN_KEY, tracer.getCurrentSpan()); client.setSecret(new Secret("StorageAccountPassword", "f4G34fMh8v-fdsgjsk2323=-asdsdfsdf") .expires(OffsetDateTime.now().plusYears(1))) .then(client.setSecret(new Secret("BankAccountPassword", "f4G34fMh8v") .expires(OffsetDateTime.now().plusYears(1)))) .subscriberContext(traceContext) .block(); client.listSecrets() .subscriberContext(traceContext) .subscribe(secretBase -> client.getSecret(secretBase) .subscriberContext(traceContext) .subscribe(secret -> System.out.printf("Received secret with name %s and value %s%n", secret.name(), secret.value()))); client.setSecret(, "sskdjfsdasdjsd") .subscriberContext(traceContext) .block(); client.listSecretVersions("BankAccountPassword") .subscriberContext(traceContext) .subscribe(secretBase -> System.out.printf("Received secret's version with name %s%n", secretBase.name())); } Tracing.getExportComponent().shutdown(); }
client.setSecret(, "sskdjfsdasdjsd")
public static void main(String[] args) throws IllegalArgumentException, InterruptedException { ZipkinTraceExporter.createAndRegister("http: TraceConfig traceConfig = Tracing.getTraceConfig(); TraceParams activeTraceParams = traceConfig.getActiveTraceParams(); traceConfig.updateActiveTraceParams(activeTraceParams.toBuilder().setSampler(Samplers.alwaysSample()).build()); Tracer tracer = Tracing.getTracer(); SecretAsyncClient client = new SecretClientBuilder() .endpoint("https: .credential(new DefaultAzureCredentialBuilder().build()) .buildAsyncClient(); Semaphore semaphore = new Semaphore(1); Scope scope = tracer.spanBuilder("user-parent-span").startScopedSpan(); semaphore.acquire(); Context traceContext = Context.of(OPENCENSUS_SPAN_KEY, tracer.getCurrentSpan()); client.setSecret(new Secret("StorageAccountPassword", "password")) .then(client.setSecret(new Secret("BankAccountPassword", "password"))) .subscriberContext(traceContext) .subscribe(secretResponse -> System.out.printf("Secret is created with name %s and value %s %n", secretResponse.getName(), secretResponse.getValue()), err -> { System.out.printf("Error thrown when enqueue the message. Error message: %s%n", err.getMessage()); scope.close(); semaphore.release(); }, () -> { semaphore.release(); }); semaphore.acquire(); client.listSecrets() .subscriberContext(traceContext) .subscribe(secretBase -> client.getSecret(secretBase) .subscriberContext(traceContext) .subscribe(secret -> System.out.printf("Received secret with name %s and value %s%n", secret.getName(), secret.getValue()))); client.setSecret("BankAccountPassword", "new password") .subscriberContext(traceContext) .subscribe(secretResponse -> System.out.printf("Secret is created with name %s and value %s %n", secretResponse.getName(), secretResponse.getValue()), err -> { System.out.printf("Error thrown when enqueue the message. Error message: %s%n", err.getMessage()); scope.close(); semaphore.release(); }, () -> { semaphore.release(); }); semaphore.acquire(); client.listSecretVersions("BankAccountPassword") .subscriberContext(traceContext) .subscribe(secretBase -> System.out.printf("Received secret's version with name %s%n", secretBase.getName())); scope.close(); Tracing.getExportComponent().shutdown(); }
class ListOperationsAsync { /** * Authenticates with the key vault and shows how to list secrets and list versions of a specific secret in the key vault * with trace spans exported to zipkin. * * Please refer to the <a href=https: * for more documentation on using a zipkin exporter. * * @param args Unused. Arguments to the program. * @throws IllegalArgumentException when invalid key vault endpoint is passed. */ }
class ListOperationsAsync { /** * Authenticates with the key vault and shows how to list secrets and list versions of a specific secret in the key * vault with trace spans exported to Zipkin. * * Please refer to the <a href=https: * using a Zipkin exporter. * * @param args Unused. Arguments to the program. * @throws IllegalArgumentException when invalid key vault endpoint is passed. * @throws InterruptedException when the thread is interrupted in sleep mode. */ }
I'm wary of this change. I've run across Zulu's JVM messing up during compilation and bombing out on these because it fails to interpret this method reference. I would advise to keep the changes limited to the scope of this PR.
public void testWithMultiplePartitions() throws Exception { final CountDownLatch count = new CountDownLatch(1); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1", "2", "3")); when(eventHubAsyncClient.eventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient .createConsumer(anyString(), eq("1"), any(EventPosition.class), any(EventHubConsumerOptions.class))) .thenReturn(consumer1); when(consumer1.receive()).thenReturn( Mono.fromRunnable(count::countDown).thenMany(Flux.just(eventData1, eventData2))); when(eventData1.sequenceNumber()).thenReturn(1L); when(eventData2.sequenceNumber()).thenReturn(2L); when(eventData1.offset()).thenReturn(1L); when(eventData2.offset()).thenReturn(100L); when(eventHubAsyncClient .createConsumer(anyString(), eq("2"), any(EventPosition.class), any(EventHubConsumerOptions.class))) .thenReturn(consumer2); when(consumer2.receive()).thenReturn(Mono.fromRunnable(count::countDown).thenMany(Flux.just(eventData3))); when(eventData3.sequenceNumber()).thenReturn(1L); when(eventData3.offset()).thenReturn(1L); when(eventHubAsyncClient .createConsumer(anyString(), eq("3"), any(EventPosition.class), any(EventHubConsumerOptions.class))) .thenReturn(consumer3); when(consumer3.receive()).thenReturn(Mono.fromRunnable(count::countDown).thenMany(Flux.just(eventData4))); when(eventData4.sequenceNumber()).thenReturn(1L); when(eventData4.offset()).thenReturn(1L); final InMemoryPartitionManager partitionManager = new InMemoryPartitionManager(); final TracerProvider tracerProvider = new TracerProvider(Collections.emptyList()); final EventProcessor eventProcessor = new EventProcessor(eventHubAsyncClient, "test-consumer", TestPartitionProcessor::new, EventPosition.latest(), partitionManager, tracerProvider); eventProcessor.start(); final boolean completed = count.await(10, TimeUnit.SECONDS); eventProcessor.stop(); Assert.assertTrue(completed); StepVerifier.create(partitionManager.listOwnership("test-eh", "test-consumer")) .expectNextCount(1).verifyComplete(); verify(eventHubAsyncClient, atLeast(1)).getPartitionIds(); verify(eventHubAsyncClient, times(1)) .createConsumer(anyString(), anyString(), any(EventPosition.class), any(EventHubConsumerOptions.class)); StepVerifier.create(partitionManager.listOwnership("test-eh", "test-consumer")) .assertNext(po -> { try { if (po.partitionId().equals("1")) { verify(consumer1, atLeastOnce()).receive(); verify(consumer1, atLeastOnce()).close(); } else if (po.partitionId().equals("2")) { verify(consumer2, atLeastOnce()).receive(); verify(consumer2, atLeastOnce()).close(); } else { verify(consumer3, atLeastOnce()).receive(); verify(consumer3, atLeastOnce()).close(); } } catch (IOException ex) { fail("Failed to assert consumer close method invocation"); } }).verifyComplete(); }
Mono.fromRunnable(count::countDown).thenMany(Flux.just(eventData1, eventData2)));
public void testWithMultiplePartitions() throws Exception { final CountDownLatch count = new CountDownLatch(1); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1", "2", "3")); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient .createConsumer(anyString(), eq("1"), any(EventPosition.class), any(EventHubConsumerOptions.class))) .thenReturn(consumer1); when(consumer1.receive()).thenReturn( Mono.fromRunnable(() -> count.countDown()).thenMany(Flux.just(eventData1, eventData2))); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData1.getOffset()).thenReturn(1L); when(eventData2.getOffset()).thenReturn(100L); when(eventHubAsyncClient .createConsumer(anyString(), eq("2"), any(EventPosition.class), any(EventHubConsumerOptions.class))) .thenReturn(consumer2); when(consumer2.receive()).thenReturn(Mono.fromRunnable(() -> count.countDown()).thenMany(Flux.just(eventData3))); when(eventData3.getSequenceNumber()).thenReturn(1L); when(eventData3.getOffset()).thenReturn(1L); when(eventHubAsyncClient .createConsumer(anyString(), eq("3"), any(EventPosition.class), any(EventHubConsumerOptions.class))) .thenReturn(consumer3); when(consumer3.receive()).thenReturn(Mono.fromRunnable(() -> count.countDown()).thenMany(Flux.just(eventData4))); when(eventData4.getSequenceNumber()).thenReturn(1L); when(eventData4.getOffset()).thenReturn(1L); final InMemoryPartitionManager partitionManager = new InMemoryPartitionManager(); final TracerProvider tracerProvider = new TracerProvider(Collections.emptyList()); final EventProcessor eventProcessor = new EventProcessor(eventHubAsyncClient, "test-consumer", TestPartitionProcessor::new, EventPosition.latest(), partitionManager, tracerProvider); eventProcessor.start(); final boolean completed = count.await(10, TimeUnit.SECONDS); eventProcessor.stop(); Assert.assertTrue(completed); StepVerifier.create(partitionManager.listOwnership("test-eh", "test-consumer")) .expectNextCount(1).verifyComplete(); verify(eventHubAsyncClient, atLeast(1)).getPartitionIds(); verify(eventHubAsyncClient, times(1)) .createConsumer(anyString(), anyString(), any(EventPosition.class), any(EventHubConsumerOptions.class)); StepVerifier.create(partitionManager.listOwnership("test-eh", "test-consumer")) .assertNext(po -> { try { if (po.getPartitionId().equals("1")) { verify(consumer1, atLeastOnce()).receive(); verify(consumer1, atLeastOnce()).close(); } else if (po.getPartitionId().equals("2")) { verify(consumer2, atLeastOnce()).receive(); verify(consumer2, atLeastOnce()).close(); } else { verify(consumer3, atLeastOnce()).receive(); verify(consumer3, atLeastOnce()).close(); } } catch (IOException ex) { fail("Failed to assert consumer close method invocation"); } }).verifyComplete(); }
class EventProcessorTest { @Mock private EventHubAsyncClient eventHubAsyncClient; @Mock private EventHubAsyncConsumer consumer1, consumer2, consumer3; @Mock private EventData eventData1, eventData2, eventData3, eventData4; @Before public void setup() { MockitoAnnotations.initMocks(this); } /** * Tests all the happy cases for {@link EventProcessor}. * * @throws Exception if an error occurs while running the test. */ @Test public void testWithSimplePartitionProcessor() throws Exception { when(eventHubAsyncClient.eventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyString(), any(EventPosition.class), any(EventHubConsumerOptions.class))) .thenReturn(consumer1); when(consumer1.receive()).thenReturn(Flux.just(eventData1, eventData2)); when(eventData1.sequenceNumber()).thenReturn(1L); when(eventData2.sequenceNumber()).thenReturn(2L); when(eventData1.offset()).thenReturn(1L); when(eventData2.offset()).thenReturn(100L); final InMemoryPartitionManager partitionManager = new InMemoryPartitionManager(); final TestPartitionProcessor testPartitionProcessor = new TestPartitionProcessor(); final long beforeTest = System.currentTimeMillis(); final EventProcessor eventProcessor = new EventProcessorBuilder() .eventHubClient(eventHubAsyncClient) .consumerGroup("test-consumer") .partitionProcessorFactory(() -> testPartitionProcessor) .partitionManager(partitionManager) .buildEventProcessor(); eventProcessor.start(); TimeUnit.SECONDS.sleep(10); eventProcessor.stop(); assertNotNull(eventProcessor.identifier()); StepVerifier.create(partitionManager.listOwnership("test-eh", "test-consumer")) .expectNextCount(1).verifyComplete(); StepVerifier.create(partitionManager.listOwnership("test-eh", "test-consumer")) .assertNext(partitionOwnership -> { assertEquals("Partition", "1", partitionOwnership.partitionId()); assertEquals("Consumer", "test-consumer", partitionOwnership.consumerGroupName()); assertEquals("EventHub name", "test-eh", partitionOwnership.eventHubName()); assertEquals("Sequence number", 2, (long) partitionOwnership.sequenceNumber()); assertEquals("Offset", Long.valueOf(100), partitionOwnership.offset()); assertEquals("OwnerId", eventProcessor.identifier(), partitionOwnership.ownerId()); assertTrue("LastModifiedTime", partitionOwnership.lastModifiedTime() >= beforeTest); assertTrue("LastModifiedTime", partitionOwnership.lastModifiedTime() <= System.currentTimeMillis()); assertNotNull(partitionOwnership.eTag()); }).verifyComplete(); verify(eventHubAsyncClient, atLeastOnce()).getPartitionIds(); verify(eventHubAsyncClient, atLeastOnce()) .createConsumer(anyString(), anyString(), any(EventPosition.class), any(EventHubConsumerOptions.class)); verify(consumer1, atLeastOnce()).receive(); verify(consumer1, atLeastOnce()).close(); } /** * Tests {@link EventProcessor} with a partition processor that throws an exception when processing an event. * * @throws Exception if an error occurs while running the test. */ @Test public void testWithFaultyPartitionProcessor() throws Exception { when(eventHubAsyncClient.eventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyString(), any(EventPosition.class), any(EventHubConsumerOptions.class))) .thenReturn(consumer1); when(consumer1.receive()).thenReturn(Flux.just(eventData1)); final InMemoryPartitionManager partitionManager = new InMemoryPartitionManager(); final FaultyPartitionProcessor faultyPartitionProcessor = new FaultyPartitionProcessor(); final EventProcessor eventProcessor = new EventProcessorBuilder() .eventHubClient(eventHubAsyncClient) .consumerGroup("test-consumer") .partitionProcessorFactory(() -> faultyPartitionProcessor) .partitionManager(partitionManager) .buildEventProcessor(); eventProcessor.start(); TimeUnit.SECONDS.sleep(10); eventProcessor.stop(); assertTrue(faultyPartitionProcessor.error); } /** * Tests process start spans error messages invoked for {@link EventProcessor}. * * @throws Exception if an error occurs while running the test. */ @Test public void testErrorProcessSpans() throws Exception { final Tracer tracer1 = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer1); TracerProvider tracerProvider = new TracerProvider(tracers); when(eventHubAsyncClient.eventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyString(), any(EventPosition.class), any(EventHubConsumerOptions.class))) .thenReturn(consumer1); when(eventData1.sequenceNumber()).thenReturn(1L); when(eventData2.sequenceNumber()).thenReturn(2L); when(eventData1.offset()).thenReturn(1L); when(eventData2.offset()).thenReturn(100L); String diagnosticId = "00-08ee063508037b1719dddcbf248e30e2-1365c684eb25daed-01"; Map<String, Object> properties = new HashMap<>(); properties.put(DIAGNOSTIC_ID_KEY, diagnosticId); when(eventData1.properties()).thenReturn(properties); when(consumer1.receive()).thenReturn(Flux.just(eventData1)); when(tracer1.extractContext(eq(diagnosticId), any())).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT, "value"); } ); when(tracer1.start(eq("Azure.eventhubs.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT, "value1") .addData("scope", (Closeable) () -> { }) .addData(OPENCENSUS_SPAN_KEY, "value2"); } ); final InMemoryPartitionManager partitionManager = new InMemoryPartitionManager(); final EventProcessor eventProcessor = new EventProcessor(eventHubAsyncClient, "test-consumer", FaultyPartitionProcessor::new, EventPosition.earliest(), partitionManager, tracerProvider); eventProcessor.start(); TimeUnit.SECONDS.sleep(10); eventProcessor.stop(); verify(tracer1, times(1)).extractContext(eq(diagnosticId), any()); verify(tracer1, times(1)).start(eq("Azure.eventhubs.process"), any(), eq(ProcessKind.PROCESS)); verify(tracer1, times(1)).end(eq(""), any(IllegalStateException.class), any()); } /** * Tests process start spans invoked for {@link EventProcessor}. * * @throws Exception if an error occurs while running the test. */ @Test public void testProcessSpans() throws Exception { final Tracer tracer1 = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer1); TracerProvider tracerProvider = new TracerProvider(tracers); when(eventHubAsyncClient.eventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyString(), any(EventPosition.class), any(EventHubConsumerOptions.class))) .thenReturn(consumer1); when(eventData1.sequenceNumber()).thenReturn(1L); when(eventData2.sequenceNumber()).thenReturn(2L); when(eventData1.offset()).thenReturn(1L); when(eventData2.offset()).thenReturn(100L); String diagnosticId = "00-08ee063508037b1719dddcbf248e30e2-1365c684eb25daed-01"; Map<String, Object> properties = new HashMap<>(); properties.put(DIAGNOSTIC_ID_KEY, diagnosticId); when(eventData1.properties()).thenReturn(properties); when(consumer1.receive()).thenReturn(Flux.just(eventData1)); when(tracer1.extractContext(eq(diagnosticId), any())).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT, "value"); } ); when(tracer1.start(eq("Azure.eventhubs.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT, "value1").addData("scope", (Closeable) () -> { }).addData(OPENCENSUS_SPAN_KEY, "value2"); } ); final InMemoryPartitionManager partitionManager = new InMemoryPartitionManager(); final EventProcessor eventProcessor = new EventProcessor(eventHubAsyncClient, "test-consumer", TestPartitionProcessor::new, EventPosition.earliest(), partitionManager, tracerProvider); eventProcessor.start(); TimeUnit.SECONDS.sleep(10); eventProcessor.stop(); verify(tracer1, times(1)).extractContext(eq(diagnosticId), any()); verify(tracer1, times(1)).start(eq("Azure.eventhubs.process"), any(), eq(ProcessKind.PROCESS)); verify(tracer1, times(1)).end(eq("success"), isNull(), any()); } /** * Tests {@link EventProcessor} that processes events from an Event Hub configured with multiple * partitions. * * @throws Exception if an error occurs while running the test. */ @Test private static final class FaultyPartitionProcessor extends PartitionProcessor { boolean error; @Override public Mono<Void> processEvent(PartitionContext partitionContext, EventData eventData) { return Mono.error(new IllegalStateException()); } @Override public void processError(PartitionContext partitionContext, Throwable throwable) { error = true; } } private static final class TestPartitionProcessor extends PartitionProcessor { @Override public Mono<Void> processEvent(PartitionContext partitionContext, EventData eventData) { return partitionContext.updateCheckpoint(eventData); } } }
class EventProcessorTest { @Mock private EventHubAsyncClient eventHubAsyncClient; @Mock private EventHubAsyncConsumer consumer1, consumer2, consumer3; @Mock private EventData eventData1, eventData2, eventData3, eventData4; @Before public void setup() { MockitoAnnotations.initMocks(this); } @After public void teardown() { consumer1 = null; consumer2 = null; consumer3 = null; eventData1 = null; eventData2 = null; eventData3 = null; eventData4 = null; eventHubAsyncClient = null; Mockito.framework().clearInlineMocks(); } /** * Tests all the happy cases for {@link EventProcessor}. * * @throws Exception if an error occurs while running the test. */ @Test public void testWithSimplePartitionProcessor() throws Exception { when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyString(), any(EventPosition.class), any(EventHubConsumerOptions.class))) .thenReturn(consumer1); when(consumer1.receive()).thenReturn(Flux.just(eventData1, eventData2)); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData1.getOffset()).thenReturn(1L); when(eventData2.getOffset()).thenReturn(100L); final InMemoryPartitionManager partitionManager = new InMemoryPartitionManager(); final TestPartitionProcessor testPartitionProcessor = new TestPartitionProcessor(); final long beforeTest = System.currentTimeMillis(); final EventProcessor eventProcessor = new EventProcessorBuilder() .eventHubClient(eventHubAsyncClient) .consumerGroup("test-consumer") .partitionProcessorFactory(() -> testPartitionProcessor) .partitionManager(partitionManager) .buildEventProcessor(); eventProcessor.start(); TimeUnit.SECONDS.sleep(10); eventProcessor.stop(); assertNotNull(eventProcessor.getIdentifier()); StepVerifier.create(partitionManager.listOwnership("test-eh", "test-consumer")) .expectNextCount(1).verifyComplete(); StepVerifier.create(partitionManager.listOwnership("test-eh", "test-consumer")) .assertNext(partitionOwnership -> { assertEquals("Partition", "1", partitionOwnership.getPartitionId()); assertEquals("Consumer", "test-consumer", partitionOwnership.getConsumerGroupName()); assertEquals("EventHub name", "test-eh", partitionOwnership.getEventHubName()); assertEquals("Sequence number", 2, (long) partitionOwnership.getSequenceNumber()); assertEquals("Offset", Long.valueOf(100), partitionOwnership.getOffset()); assertEquals("OwnerId", eventProcessor.getIdentifier(), partitionOwnership.getOwnerId()); assertTrue("LastModifiedTime", partitionOwnership.getLastModifiedTime() >= beforeTest); assertTrue("LastModifiedTime", partitionOwnership.getLastModifiedTime() <= System.currentTimeMillis()); assertNotNull(partitionOwnership.getETag()); }).verifyComplete(); verify(eventHubAsyncClient, atLeastOnce()).getPartitionIds(); verify(eventHubAsyncClient, atLeastOnce()) .createConsumer(anyString(), anyString(), any(EventPosition.class), any(EventHubConsumerOptions.class)); verify(consumer1, atLeastOnce()).receive(); verify(consumer1, atLeastOnce()).close(); } /** * Tests {@link EventProcessor} with a partition processor that throws an exception when processing an event. * * @throws Exception if an error occurs while running the test. */ @Test public void testWithFaultyPartitionProcessor() throws Exception { when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyString(), any(EventPosition.class), any(EventHubConsumerOptions.class))) .thenReturn(consumer1); when(consumer1.receive()).thenReturn(Flux.just(eventData1)); final InMemoryPartitionManager partitionManager = new InMemoryPartitionManager(); final FaultyPartitionProcessor faultyPartitionProcessor = new FaultyPartitionProcessor(); final EventProcessor eventProcessor = new EventProcessorBuilder() .eventHubClient(eventHubAsyncClient) .consumerGroup("test-consumer") .partitionProcessorFactory(() -> faultyPartitionProcessor) .partitionManager(partitionManager) .buildEventProcessor(); eventProcessor.start(); TimeUnit.SECONDS.sleep(10); eventProcessor.stop(); assertTrue(faultyPartitionProcessor.error); } /** * Tests process start spans error messages invoked for {@link EventProcessor}. * * @throws Exception if an error occurs while running the test. */ @Test public void testErrorProcessSpans() throws Exception { final Tracer tracer1 = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer1); TracerProvider tracerProvider = new TracerProvider(tracers); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyString(), any(EventPosition.class), any(EventHubConsumerOptions.class))) .thenReturn(consumer1); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData1.getOffset()).thenReturn(1L); when(eventData2.getOffset()).thenReturn(100L); String diagnosticId = "00-08ee063508037b1719dddcbf248e30e2-1365c684eb25daed-01"; Map<String, Object> properties = new HashMap<>(); properties.put(DIAGNOSTIC_ID_KEY, diagnosticId); when(eventData1.getProperties()).thenReturn(properties); when(consumer1.receive()).thenReturn(Flux.just(eventData1)); when(tracer1.extractContext(eq(diagnosticId), any())).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT, "value"); } ); when(tracer1.start(eq("Azure.eventhubs.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT, "value1") .addData("scope", (Closeable) () -> { }) .addData(OPENCENSUS_SPAN_KEY, "value2"); } ); final InMemoryPartitionManager partitionManager = new InMemoryPartitionManager(); final EventProcessor eventProcessor = new EventProcessor(eventHubAsyncClient, "test-consumer", FaultyPartitionProcessor::new, EventPosition.earliest(), partitionManager, tracerProvider); eventProcessor.start(); TimeUnit.SECONDS.sleep(10); eventProcessor.stop(); verify(tracer1, times(1)).extractContext(eq(diagnosticId), any()); verify(tracer1, times(1)).start(eq("Azure.eventhubs.process"), any(), eq(ProcessKind.PROCESS)); verify(tracer1, times(1)).end(eq(""), any(IllegalStateException.class), any()); } /** * Tests process start spans invoked for {@link EventProcessor}. * * @throws Exception if an error occurs while running the test. */ @Test public void testProcessSpans() throws Exception { final Tracer tracer1 = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer1); TracerProvider tracerProvider = new TracerProvider(tracers); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyString(), any(EventPosition.class), any(EventHubConsumerOptions.class))) .thenReturn(consumer1); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData1.getOffset()).thenReturn(1L); when(eventData2.getOffset()).thenReturn(100L); String diagnosticId = "00-08ee063508037b1719dddcbf248e30e2-1365c684eb25daed-01"; Map<String, Object> properties = new HashMap<>(); properties.put(DIAGNOSTIC_ID_KEY, diagnosticId); when(eventData1.getProperties()).thenReturn(properties); when(consumer1.receive()).thenReturn(Flux.just(eventData1)); when(tracer1.extractContext(eq(diagnosticId), any())).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT, "value"); } ); when(tracer1.start(eq("Azure.eventhubs.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT, "value1").addData("scope", (Closeable) () -> { return; }).addData(OPENCENSUS_SPAN_KEY, "value2"); } ); final InMemoryPartitionManager partitionManager = new InMemoryPartitionManager(); final EventProcessor eventProcessor = new EventProcessor(eventHubAsyncClient, "test-consumer", TestPartitionProcessor::new, EventPosition.earliest(), partitionManager, tracerProvider); eventProcessor.start(); TimeUnit.SECONDS.sleep(10); eventProcessor.stop(); verify(tracer1, times(1)).extractContext(eq(diagnosticId), any()); verify(tracer1, times(1)).start(eq("Azure.eventhubs.process"), any(), eq(ProcessKind.PROCESS)); verify(tracer1, times(1)).end(eq("success"), isNull(), any()); } /** * Tests {@link EventProcessor} that processes events from an Event Hub configured with multiple * partitions. * * @throws Exception if an error occurs while running the test. */ @Test private static final class FaultyPartitionProcessor extends PartitionProcessor { boolean error; @Override public Mono<Void> processEvent(PartitionContext partitionContext, EventData eventData) { return Mono.error(new IllegalStateException()); } @Override public void processError(PartitionContext partitionContext, Throwable throwable) { error = true; } } private static final class TestPartitionProcessor extends PartitionProcessor { @Override public Mono<Void> processEvent(PartitionContext partitionContext, EventData eventData) { return partitionContext.updateCheckpoint(eventData); } } }
Same with the one below. I'll find the bug … The one on the left was causing the thing to fail compilation. https://github.com/Azure/azure-sdk-for-java/pull/4038/commits/69577ac0aeaa0a6f2d658e57a832b493d9f6c167 https://bugs.openjdk.java.net/browse/JDK-8221420
public void testWithMultiplePartitions() throws Exception { final CountDownLatch count = new CountDownLatch(1); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1", "2", "3")); when(eventHubAsyncClient.eventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient .createConsumer(anyString(), eq("1"), any(EventPosition.class), any(EventHubConsumerOptions.class))) .thenReturn(consumer1); when(consumer1.receive()).thenReturn( Mono.fromRunnable(count::countDown).thenMany(Flux.just(eventData1, eventData2))); when(eventData1.sequenceNumber()).thenReturn(1L); when(eventData2.sequenceNumber()).thenReturn(2L); when(eventData1.offset()).thenReturn(1L); when(eventData2.offset()).thenReturn(100L); when(eventHubAsyncClient .createConsumer(anyString(), eq("2"), any(EventPosition.class), any(EventHubConsumerOptions.class))) .thenReturn(consumer2); when(consumer2.receive()).thenReturn(Mono.fromRunnable(count::countDown).thenMany(Flux.just(eventData3))); when(eventData3.sequenceNumber()).thenReturn(1L); when(eventData3.offset()).thenReturn(1L); when(eventHubAsyncClient .createConsumer(anyString(), eq("3"), any(EventPosition.class), any(EventHubConsumerOptions.class))) .thenReturn(consumer3); when(consumer3.receive()).thenReturn(Mono.fromRunnable(count::countDown).thenMany(Flux.just(eventData4))); when(eventData4.sequenceNumber()).thenReturn(1L); when(eventData4.offset()).thenReturn(1L); final InMemoryPartitionManager partitionManager = new InMemoryPartitionManager(); final TracerProvider tracerProvider = new TracerProvider(Collections.emptyList()); final EventProcessor eventProcessor = new EventProcessor(eventHubAsyncClient, "test-consumer", TestPartitionProcessor::new, EventPosition.latest(), partitionManager, tracerProvider); eventProcessor.start(); final boolean completed = count.await(10, TimeUnit.SECONDS); eventProcessor.stop(); Assert.assertTrue(completed); StepVerifier.create(partitionManager.listOwnership("test-eh", "test-consumer")) .expectNextCount(1).verifyComplete(); verify(eventHubAsyncClient, atLeast(1)).getPartitionIds(); verify(eventHubAsyncClient, times(1)) .createConsumer(anyString(), anyString(), any(EventPosition.class), any(EventHubConsumerOptions.class)); StepVerifier.create(partitionManager.listOwnership("test-eh", "test-consumer")) .assertNext(po -> { try { if (po.partitionId().equals("1")) { verify(consumer1, atLeastOnce()).receive(); verify(consumer1, atLeastOnce()).close(); } else if (po.partitionId().equals("2")) { verify(consumer2, atLeastOnce()).receive(); verify(consumer2, atLeastOnce()).close(); } else { verify(consumer3, atLeastOnce()).receive(); verify(consumer3, atLeastOnce()).close(); } } catch (IOException ex) { fail("Failed to assert consumer close method invocation"); } }).verifyComplete(); }
Mono.fromRunnable(count::countDown).thenMany(Flux.just(eventData1, eventData2)));
public void testWithMultiplePartitions() throws Exception { final CountDownLatch count = new CountDownLatch(1); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1", "2", "3")); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient .createConsumer(anyString(), eq("1"), any(EventPosition.class), any(EventHubConsumerOptions.class))) .thenReturn(consumer1); when(consumer1.receive()).thenReturn( Mono.fromRunnable(() -> count.countDown()).thenMany(Flux.just(eventData1, eventData2))); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData1.getOffset()).thenReturn(1L); when(eventData2.getOffset()).thenReturn(100L); when(eventHubAsyncClient .createConsumer(anyString(), eq("2"), any(EventPosition.class), any(EventHubConsumerOptions.class))) .thenReturn(consumer2); when(consumer2.receive()).thenReturn(Mono.fromRunnable(() -> count.countDown()).thenMany(Flux.just(eventData3))); when(eventData3.getSequenceNumber()).thenReturn(1L); when(eventData3.getOffset()).thenReturn(1L); when(eventHubAsyncClient .createConsumer(anyString(), eq("3"), any(EventPosition.class), any(EventHubConsumerOptions.class))) .thenReturn(consumer3); when(consumer3.receive()).thenReturn(Mono.fromRunnable(() -> count.countDown()).thenMany(Flux.just(eventData4))); when(eventData4.getSequenceNumber()).thenReturn(1L); when(eventData4.getOffset()).thenReturn(1L); final InMemoryPartitionManager partitionManager = new InMemoryPartitionManager(); final TracerProvider tracerProvider = new TracerProvider(Collections.emptyList()); final EventProcessor eventProcessor = new EventProcessor(eventHubAsyncClient, "test-consumer", TestPartitionProcessor::new, EventPosition.latest(), partitionManager, tracerProvider); eventProcessor.start(); final boolean completed = count.await(10, TimeUnit.SECONDS); eventProcessor.stop(); Assert.assertTrue(completed); StepVerifier.create(partitionManager.listOwnership("test-eh", "test-consumer")) .expectNextCount(1).verifyComplete(); verify(eventHubAsyncClient, atLeast(1)).getPartitionIds(); verify(eventHubAsyncClient, times(1)) .createConsumer(anyString(), anyString(), any(EventPosition.class), any(EventHubConsumerOptions.class)); StepVerifier.create(partitionManager.listOwnership("test-eh", "test-consumer")) .assertNext(po -> { try { if (po.getPartitionId().equals("1")) { verify(consumer1, atLeastOnce()).receive(); verify(consumer1, atLeastOnce()).close(); } else if (po.getPartitionId().equals("2")) { verify(consumer2, atLeastOnce()).receive(); verify(consumer2, atLeastOnce()).close(); } else { verify(consumer3, atLeastOnce()).receive(); verify(consumer3, atLeastOnce()).close(); } } catch (IOException ex) { fail("Failed to assert consumer close method invocation"); } }).verifyComplete(); }
class EventProcessorTest { @Mock private EventHubAsyncClient eventHubAsyncClient; @Mock private EventHubAsyncConsumer consumer1, consumer2, consumer3; @Mock private EventData eventData1, eventData2, eventData3, eventData4; @Before public void setup() { MockitoAnnotations.initMocks(this); } /** * Tests all the happy cases for {@link EventProcessor}. * * @throws Exception if an error occurs while running the test. */ @Test public void testWithSimplePartitionProcessor() throws Exception { when(eventHubAsyncClient.eventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyString(), any(EventPosition.class), any(EventHubConsumerOptions.class))) .thenReturn(consumer1); when(consumer1.receive()).thenReturn(Flux.just(eventData1, eventData2)); when(eventData1.sequenceNumber()).thenReturn(1L); when(eventData2.sequenceNumber()).thenReturn(2L); when(eventData1.offset()).thenReturn(1L); when(eventData2.offset()).thenReturn(100L); final InMemoryPartitionManager partitionManager = new InMemoryPartitionManager(); final TestPartitionProcessor testPartitionProcessor = new TestPartitionProcessor(); final long beforeTest = System.currentTimeMillis(); final EventProcessor eventProcessor = new EventProcessorBuilder() .eventHubClient(eventHubAsyncClient) .consumerGroup("test-consumer") .partitionProcessorFactory(() -> testPartitionProcessor) .partitionManager(partitionManager) .buildEventProcessor(); eventProcessor.start(); TimeUnit.SECONDS.sleep(10); eventProcessor.stop(); assertNotNull(eventProcessor.identifier()); StepVerifier.create(partitionManager.listOwnership("test-eh", "test-consumer")) .expectNextCount(1).verifyComplete(); StepVerifier.create(partitionManager.listOwnership("test-eh", "test-consumer")) .assertNext(partitionOwnership -> { assertEquals("Partition", "1", partitionOwnership.partitionId()); assertEquals("Consumer", "test-consumer", partitionOwnership.consumerGroupName()); assertEquals("EventHub name", "test-eh", partitionOwnership.eventHubName()); assertEquals("Sequence number", 2, (long) partitionOwnership.sequenceNumber()); assertEquals("Offset", Long.valueOf(100), partitionOwnership.offset()); assertEquals("OwnerId", eventProcessor.identifier(), partitionOwnership.ownerId()); assertTrue("LastModifiedTime", partitionOwnership.lastModifiedTime() >= beforeTest); assertTrue("LastModifiedTime", partitionOwnership.lastModifiedTime() <= System.currentTimeMillis()); assertNotNull(partitionOwnership.eTag()); }).verifyComplete(); verify(eventHubAsyncClient, atLeastOnce()).getPartitionIds(); verify(eventHubAsyncClient, atLeastOnce()) .createConsumer(anyString(), anyString(), any(EventPosition.class), any(EventHubConsumerOptions.class)); verify(consumer1, atLeastOnce()).receive(); verify(consumer1, atLeastOnce()).close(); } /** * Tests {@link EventProcessor} with a partition processor that throws an exception when processing an event. * * @throws Exception if an error occurs while running the test. */ @Test public void testWithFaultyPartitionProcessor() throws Exception { when(eventHubAsyncClient.eventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyString(), any(EventPosition.class), any(EventHubConsumerOptions.class))) .thenReturn(consumer1); when(consumer1.receive()).thenReturn(Flux.just(eventData1)); final InMemoryPartitionManager partitionManager = new InMemoryPartitionManager(); final FaultyPartitionProcessor faultyPartitionProcessor = new FaultyPartitionProcessor(); final EventProcessor eventProcessor = new EventProcessorBuilder() .eventHubClient(eventHubAsyncClient) .consumerGroup("test-consumer") .partitionProcessorFactory(() -> faultyPartitionProcessor) .partitionManager(partitionManager) .buildEventProcessor(); eventProcessor.start(); TimeUnit.SECONDS.sleep(10); eventProcessor.stop(); assertTrue(faultyPartitionProcessor.error); } /** * Tests process start spans error messages invoked for {@link EventProcessor}. * * @throws Exception if an error occurs while running the test. */ @Test public void testErrorProcessSpans() throws Exception { final Tracer tracer1 = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer1); TracerProvider tracerProvider = new TracerProvider(tracers); when(eventHubAsyncClient.eventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyString(), any(EventPosition.class), any(EventHubConsumerOptions.class))) .thenReturn(consumer1); when(eventData1.sequenceNumber()).thenReturn(1L); when(eventData2.sequenceNumber()).thenReturn(2L); when(eventData1.offset()).thenReturn(1L); when(eventData2.offset()).thenReturn(100L); String diagnosticId = "00-08ee063508037b1719dddcbf248e30e2-1365c684eb25daed-01"; Map<String, Object> properties = new HashMap<>(); properties.put(DIAGNOSTIC_ID_KEY, diagnosticId); when(eventData1.properties()).thenReturn(properties); when(consumer1.receive()).thenReturn(Flux.just(eventData1)); when(tracer1.extractContext(eq(diagnosticId), any())).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT, "value"); } ); when(tracer1.start(eq("Azure.eventhubs.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT, "value1") .addData("scope", (Closeable) () -> { }) .addData(OPENCENSUS_SPAN_KEY, "value2"); } ); final InMemoryPartitionManager partitionManager = new InMemoryPartitionManager(); final EventProcessor eventProcessor = new EventProcessor(eventHubAsyncClient, "test-consumer", FaultyPartitionProcessor::new, EventPosition.earliest(), partitionManager, tracerProvider); eventProcessor.start(); TimeUnit.SECONDS.sleep(10); eventProcessor.stop(); verify(tracer1, times(1)).extractContext(eq(diagnosticId), any()); verify(tracer1, times(1)).start(eq("Azure.eventhubs.process"), any(), eq(ProcessKind.PROCESS)); verify(tracer1, times(1)).end(eq(""), any(IllegalStateException.class), any()); } /** * Tests process start spans invoked for {@link EventProcessor}. * * @throws Exception if an error occurs while running the test. */ @Test public void testProcessSpans() throws Exception { final Tracer tracer1 = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer1); TracerProvider tracerProvider = new TracerProvider(tracers); when(eventHubAsyncClient.eventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyString(), any(EventPosition.class), any(EventHubConsumerOptions.class))) .thenReturn(consumer1); when(eventData1.sequenceNumber()).thenReturn(1L); when(eventData2.sequenceNumber()).thenReturn(2L); when(eventData1.offset()).thenReturn(1L); when(eventData2.offset()).thenReturn(100L); String diagnosticId = "00-08ee063508037b1719dddcbf248e30e2-1365c684eb25daed-01"; Map<String, Object> properties = new HashMap<>(); properties.put(DIAGNOSTIC_ID_KEY, diagnosticId); when(eventData1.properties()).thenReturn(properties); when(consumer1.receive()).thenReturn(Flux.just(eventData1)); when(tracer1.extractContext(eq(diagnosticId), any())).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT, "value"); } ); when(tracer1.start(eq("Azure.eventhubs.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT, "value1").addData("scope", (Closeable) () -> { }).addData(OPENCENSUS_SPAN_KEY, "value2"); } ); final InMemoryPartitionManager partitionManager = new InMemoryPartitionManager(); final EventProcessor eventProcessor = new EventProcessor(eventHubAsyncClient, "test-consumer", TestPartitionProcessor::new, EventPosition.earliest(), partitionManager, tracerProvider); eventProcessor.start(); TimeUnit.SECONDS.sleep(10); eventProcessor.stop(); verify(tracer1, times(1)).extractContext(eq(diagnosticId), any()); verify(tracer1, times(1)).start(eq("Azure.eventhubs.process"), any(), eq(ProcessKind.PROCESS)); verify(tracer1, times(1)).end(eq("success"), isNull(), any()); } /** * Tests {@link EventProcessor} that processes events from an Event Hub configured with multiple * partitions. * * @throws Exception if an error occurs while running the test. */ @Test private static final class FaultyPartitionProcessor extends PartitionProcessor { boolean error; @Override public Mono<Void> processEvent(PartitionContext partitionContext, EventData eventData) { return Mono.error(new IllegalStateException()); } @Override public void processError(PartitionContext partitionContext, Throwable throwable) { error = true; } } private static final class TestPartitionProcessor extends PartitionProcessor { @Override public Mono<Void> processEvent(PartitionContext partitionContext, EventData eventData) { return partitionContext.updateCheckpoint(eventData); } } }
class EventProcessorTest { @Mock private EventHubAsyncClient eventHubAsyncClient; @Mock private EventHubAsyncConsumer consumer1, consumer2, consumer3; @Mock private EventData eventData1, eventData2, eventData3, eventData4; @Before public void setup() { MockitoAnnotations.initMocks(this); } @After public void teardown() { consumer1 = null; consumer2 = null; consumer3 = null; eventData1 = null; eventData2 = null; eventData3 = null; eventData4 = null; eventHubAsyncClient = null; Mockito.framework().clearInlineMocks(); } /** * Tests all the happy cases for {@link EventProcessor}. * * @throws Exception if an error occurs while running the test. */ @Test public void testWithSimplePartitionProcessor() throws Exception { when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyString(), any(EventPosition.class), any(EventHubConsumerOptions.class))) .thenReturn(consumer1); when(consumer1.receive()).thenReturn(Flux.just(eventData1, eventData2)); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData1.getOffset()).thenReturn(1L); when(eventData2.getOffset()).thenReturn(100L); final InMemoryPartitionManager partitionManager = new InMemoryPartitionManager(); final TestPartitionProcessor testPartitionProcessor = new TestPartitionProcessor(); final long beforeTest = System.currentTimeMillis(); final EventProcessor eventProcessor = new EventProcessorBuilder() .eventHubClient(eventHubAsyncClient) .consumerGroup("test-consumer") .partitionProcessorFactory(() -> testPartitionProcessor) .partitionManager(partitionManager) .buildEventProcessor(); eventProcessor.start(); TimeUnit.SECONDS.sleep(10); eventProcessor.stop(); assertNotNull(eventProcessor.getIdentifier()); StepVerifier.create(partitionManager.listOwnership("test-eh", "test-consumer")) .expectNextCount(1).verifyComplete(); StepVerifier.create(partitionManager.listOwnership("test-eh", "test-consumer")) .assertNext(partitionOwnership -> { assertEquals("Partition", "1", partitionOwnership.getPartitionId()); assertEquals("Consumer", "test-consumer", partitionOwnership.getConsumerGroupName()); assertEquals("EventHub name", "test-eh", partitionOwnership.getEventHubName()); assertEquals("Sequence number", 2, (long) partitionOwnership.getSequenceNumber()); assertEquals("Offset", Long.valueOf(100), partitionOwnership.getOffset()); assertEquals("OwnerId", eventProcessor.getIdentifier(), partitionOwnership.getOwnerId()); assertTrue("LastModifiedTime", partitionOwnership.getLastModifiedTime() >= beforeTest); assertTrue("LastModifiedTime", partitionOwnership.getLastModifiedTime() <= System.currentTimeMillis()); assertNotNull(partitionOwnership.getETag()); }).verifyComplete(); verify(eventHubAsyncClient, atLeastOnce()).getPartitionIds(); verify(eventHubAsyncClient, atLeastOnce()) .createConsumer(anyString(), anyString(), any(EventPosition.class), any(EventHubConsumerOptions.class)); verify(consumer1, atLeastOnce()).receive(); verify(consumer1, atLeastOnce()).close(); } /** * Tests {@link EventProcessor} with a partition processor that throws an exception when processing an event. * * @throws Exception if an error occurs while running the test. */ @Test public void testWithFaultyPartitionProcessor() throws Exception { when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyString(), any(EventPosition.class), any(EventHubConsumerOptions.class))) .thenReturn(consumer1); when(consumer1.receive()).thenReturn(Flux.just(eventData1)); final InMemoryPartitionManager partitionManager = new InMemoryPartitionManager(); final FaultyPartitionProcessor faultyPartitionProcessor = new FaultyPartitionProcessor(); final EventProcessor eventProcessor = new EventProcessorBuilder() .eventHubClient(eventHubAsyncClient) .consumerGroup("test-consumer") .partitionProcessorFactory(() -> faultyPartitionProcessor) .partitionManager(partitionManager) .buildEventProcessor(); eventProcessor.start(); TimeUnit.SECONDS.sleep(10); eventProcessor.stop(); assertTrue(faultyPartitionProcessor.error); } /** * Tests process start spans error messages invoked for {@link EventProcessor}. * * @throws Exception if an error occurs while running the test. */ @Test public void testErrorProcessSpans() throws Exception { final Tracer tracer1 = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer1); TracerProvider tracerProvider = new TracerProvider(tracers); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyString(), any(EventPosition.class), any(EventHubConsumerOptions.class))) .thenReturn(consumer1); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData1.getOffset()).thenReturn(1L); when(eventData2.getOffset()).thenReturn(100L); String diagnosticId = "00-08ee063508037b1719dddcbf248e30e2-1365c684eb25daed-01"; Map<String, Object> properties = new HashMap<>(); properties.put(DIAGNOSTIC_ID_KEY, diagnosticId); when(eventData1.getProperties()).thenReturn(properties); when(consumer1.receive()).thenReturn(Flux.just(eventData1)); when(tracer1.extractContext(eq(diagnosticId), any())).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT, "value"); } ); when(tracer1.start(eq("Azure.eventhubs.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT, "value1") .addData("scope", (Closeable) () -> { }) .addData(OPENCENSUS_SPAN_KEY, "value2"); } ); final InMemoryPartitionManager partitionManager = new InMemoryPartitionManager(); final EventProcessor eventProcessor = new EventProcessor(eventHubAsyncClient, "test-consumer", FaultyPartitionProcessor::new, EventPosition.earliest(), partitionManager, tracerProvider); eventProcessor.start(); TimeUnit.SECONDS.sleep(10); eventProcessor.stop(); verify(tracer1, times(1)).extractContext(eq(diagnosticId), any()); verify(tracer1, times(1)).start(eq("Azure.eventhubs.process"), any(), eq(ProcessKind.PROCESS)); verify(tracer1, times(1)).end(eq(""), any(IllegalStateException.class), any()); } /** * Tests process start spans invoked for {@link EventProcessor}. * * @throws Exception if an error occurs while running the test. */ @Test public void testProcessSpans() throws Exception { final Tracer tracer1 = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer1); TracerProvider tracerProvider = new TracerProvider(tracers); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyString(), any(EventPosition.class), any(EventHubConsumerOptions.class))) .thenReturn(consumer1); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData1.getOffset()).thenReturn(1L); when(eventData2.getOffset()).thenReturn(100L); String diagnosticId = "00-08ee063508037b1719dddcbf248e30e2-1365c684eb25daed-01"; Map<String, Object> properties = new HashMap<>(); properties.put(DIAGNOSTIC_ID_KEY, diagnosticId); when(eventData1.getProperties()).thenReturn(properties); when(consumer1.receive()).thenReturn(Flux.just(eventData1)); when(tracer1.extractContext(eq(diagnosticId), any())).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT, "value"); } ); when(tracer1.start(eq("Azure.eventhubs.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT, "value1").addData("scope", (Closeable) () -> { return; }).addData(OPENCENSUS_SPAN_KEY, "value2"); } ); final InMemoryPartitionManager partitionManager = new InMemoryPartitionManager(); final EventProcessor eventProcessor = new EventProcessor(eventHubAsyncClient, "test-consumer", TestPartitionProcessor::new, EventPosition.earliest(), partitionManager, tracerProvider); eventProcessor.start(); TimeUnit.SECONDS.sleep(10); eventProcessor.stop(); verify(tracer1, times(1)).extractContext(eq(diagnosticId), any()); verify(tracer1, times(1)).start(eq("Azure.eventhubs.process"), any(), eq(ProcessKind.PROCESS)); verify(tracer1, times(1)).end(eq("success"), isNull(), any()); } /** * Tests {@link EventProcessor} that processes events from an Event Hub configured with multiple * partitions. * * @throws Exception if an error occurs while running the test. */ @Test private static final class FaultyPartitionProcessor extends PartitionProcessor { boolean error; @Override public Mono<Void> processEvent(PartitionContext partitionContext, EventData eventData) { return Mono.error(new IllegalStateException()); } @Override public void processError(PartitionContext partitionContext, Throwable throwable) { error = true; } } private static final class TestPartitionProcessor extends PartitionProcessor { @Override public Mono<Void> processEvent(PartitionContext partitionContext, EventData eventData) { return partitionContext.updateCheckpoint(eventData); } } }
Using .block() makes this sample a synchronous one and does not showcase how to use it asynchronously.
public static void main(String[] args) throws IllegalArgumentException { ZipkinTraceExporter.createAndRegister("http: TraceConfig traceConfig = Tracing.getTraceConfig(); TraceParams activeTraceParams = traceConfig.getActiveTraceParams(); traceConfig.updateActiveTraceParams(activeTraceParams.toBuilder().setSampler(Samplers.alwaysSample()).build()); Tracer tracer = Tracing.getTracer(); SecretAsyncClient client = new SecretClientBuilder() .endpoint("https: .credential(new DefaultAzureCredentialBuilder().build()) .buildAsyncClient(); Scope scope = tracer.spanBuilder("user-parent-span").startScopedSpan(); try { Context traceContext = Context.of(OPENCENSUS_SPAN_KEY, tracer.getCurrentSpan()); client.setSecret(new Secret("StorageAccountPassword", "password")) .then(client.setSecret(new Secret("BankAccountPassword", "password"))) .subscriberContext(traceContext) .block(); client.listSecrets() .subscriberContext(traceContext) .subscribe(secretBase -> client.getSecret(secretBase) .subscriberContext(traceContext) .subscribe(secret -> System.out.printf("Received secret with name %s and value %s%n", secret.getName(), secret.getValue()))); client.setSecret("BankAccountPassword", "new password") .subscriberContext(traceContext) .block(); client.listSecretVersions("BankAccountPassword") .subscriberContext(traceContext) .subscribe(secretBase -> System.out.printf("Received secret's version with name %s%n", secretBase.getName())); } finally { scope.close(); } Tracing.getExportComponent().shutdown(); }
client.setSecret("BankAccountPassword", "new password")
public static void main(String[] args) throws IllegalArgumentException, InterruptedException { ZipkinTraceExporter.createAndRegister("http: TraceConfig traceConfig = Tracing.getTraceConfig(); TraceParams activeTraceParams = traceConfig.getActiveTraceParams(); traceConfig.updateActiveTraceParams(activeTraceParams.toBuilder().setSampler(Samplers.alwaysSample()).build()); Tracer tracer = Tracing.getTracer(); SecretAsyncClient client = new SecretClientBuilder() .endpoint("https: .credential(new DefaultAzureCredentialBuilder().build()) .buildAsyncClient(); Semaphore semaphore = new Semaphore(1); Scope scope = tracer.spanBuilder("user-parent-span").startScopedSpan(); semaphore.acquire(); Context traceContext = Context.of(OPENCENSUS_SPAN_KEY, tracer.getCurrentSpan()); client.setSecret(new Secret("StorageAccountPassword", "password")) .then(client.setSecret(new Secret("BankAccountPassword", "password"))) .subscriberContext(traceContext) .subscribe(secretResponse -> System.out.printf("Secret is created with name %s and value %s %n", secretResponse.getName(), secretResponse.getValue()), err -> { System.out.printf("Error thrown when enqueue the message. Error message: %s%n", err.getMessage()); scope.close(); semaphore.release(); }, () -> { semaphore.release(); }); semaphore.acquire(); client.listSecrets() .subscriberContext(traceContext) .subscribe(secretBase -> client.getSecret(secretBase) .subscriberContext(traceContext) .subscribe(secret -> System.out.printf("Received secret with name %s and value %s%n", secret.getName(), secret.getValue()))); client.setSecret("BankAccountPassword", "new password") .subscriberContext(traceContext) .subscribe(secretResponse -> System.out.printf("Secret is created with name %s and value %s %n", secretResponse.getName(), secretResponse.getValue()), err -> { System.out.printf("Error thrown when enqueue the message. Error message: %s%n", err.getMessage()); scope.close(); semaphore.release(); }, () -> { semaphore.release(); }); semaphore.acquire(); client.listSecretVersions("BankAccountPassword") .subscriberContext(traceContext) .subscribe(secretBase -> System.out.printf("Received secret's version with name %s%n", secretBase.getName())); scope.close(); Tracing.getExportComponent().shutdown(); }
class ListOperationsAsync { /** * Authenticates with the key vault and shows how to list secrets and list versions of a specific secret in the key * vault with trace spans exported to zipkin. * * Please refer to the <a href=https: * using a zipkin exporter. * * @param args Unused. Arguments to the program. * @throws IllegalArgumentException when invalid key vault endpoint is passed. */ }
class ListOperationsAsync { /** * Authenticates with the key vault and shows how to list secrets and list versions of a specific secret in the key * vault with trace spans exported to Zipkin. * * Please refer to the <a href=https: * using a Zipkin exporter. * * @param args Unused. Arguments to the program. * @throws IllegalArgumentException when invalid key vault endpoint is passed. * @throws InterruptedException when the thread is interrupted in sleep mode. */ }
removed.
public static void main(String[] args) throws IllegalArgumentException { ZipkinTraceExporter.createAndRegister("http: TraceConfig traceConfig = Tracing.getTraceConfig(); TraceParams activeTraceParams = traceConfig.getActiveTraceParams(); traceConfig.updateActiveTraceParams(activeTraceParams.toBuilder().setSampler(Samplers.alwaysSample()).build()); Tracer tracer = Tracing.getTracer(); SecretAsyncClient client = new SecretClientBuilder() .endpoint("https: .credential(new DefaultAzureCredentialBuilder().build()) .buildAsyncClient(); Scope scope = tracer.spanBuilder("user-parent-span").startScopedSpan(); try { Context traceContext = Context.of(OPENCENSUS_SPAN_KEY, tracer.getCurrentSpan()); client.setSecret(new Secret("StorageAccountPassword", "password")) .then(client.setSecret(new Secret("BankAccountPassword", "password"))) .subscriberContext(traceContext) .block(); client.listSecrets() .subscriberContext(traceContext) .subscribe(secretBase -> client.getSecret(secretBase) .subscriberContext(traceContext) .subscribe(secret -> System.out.printf("Received secret with name %s and value %s%n", secret.getName(), secret.getValue()))); client.setSecret("BankAccountPassword", "new password") .subscriberContext(traceContext) .block(); client.listSecretVersions("BankAccountPassword") .subscriberContext(traceContext) .subscribe(secretBase -> System.out.printf("Received secret's version with name %s%n", secretBase.getName())); } finally { scope.close(); } Tracing.getExportComponent().shutdown(); }
client.setSecret("BankAccountPassword", "new password")
public static void main(String[] args) throws IllegalArgumentException, InterruptedException { ZipkinTraceExporter.createAndRegister("http: TraceConfig traceConfig = Tracing.getTraceConfig(); TraceParams activeTraceParams = traceConfig.getActiveTraceParams(); traceConfig.updateActiveTraceParams(activeTraceParams.toBuilder().setSampler(Samplers.alwaysSample()).build()); Tracer tracer = Tracing.getTracer(); SecretAsyncClient client = new SecretClientBuilder() .endpoint("https: .credential(new DefaultAzureCredentialBuilder().build()) .buildAsyncClient(); Semaphore semaphore = new Semaphore(1); Scope scope = tracer.spanBuilder("user-parent-span").startScopedSpan(); semaphore.acquire(); Context traceContext = Context.of(OPENCENSUS_SPAN_KEY, tracer.getCurrentSpan()); client.setSecret(new Secret("StorageAccountPassword", "password")) .then(client.setSecret(new Secret("BankAccountPassword", "password"))) .subscriberContext(traceContext) .subscribe(secretResponse -> System.out.printf("Secret is created with name %s and value %s %n", secretResponse.getName(), secretResponse.getValue()), err -> { System.out.printf("Error thrown when enqueue the message. Error message: %s%n", err.getMessage()); scope.close(); semaphore.release(); }, () -> { semaphore.release(); }); semaphore.acquire(); client.listSecrets() .subscriberContext(traceContext) .subscribe(secretBase -> client.getSecret(secretBase) .subscriberContext(traceContext) .subscribe(secret -> System.out.printf("Received secret with name %s and value %s%n", secret.getName(), secret.getValue()))); client.setSecret("BankAccountPassword", "new password") .subscriberContext(traceContext) .subscribe(secretResponse -> System.out.printf("Secret is created with name %s and value %s %n", secretResponse.getName(), secretResponse.getValue()), err -> { System.out.printf("Error thrown when enqueue the message. Error message: %s%n", err.getMessage()); scope.close(); semaphore.release(); }, () -> { semaphore.release(); }); semaphore.acquire(); client.listSecretVersions("BankAccountPassword") .subscriberContext(traceContext) .subscribe(secretBase -> System.out.printf("Received secret's version with name %s%n", secretBase.getName())); scope.close(); Tracing.getExportComponent().shutdown(); }
class ListOperationsAsync { /** * Authenticates with the key vault and shows how to list secrets and list versions of a specific secret in the key * vault with trace spans exported to zipkin. * * Please refer to the <a href=https: * using a zipkin exporter. * * @param args Unused. Arguments to the program. * @throws IllegalArgumentException when invalid key vault endpoint is passed. */ }
class ListOperationsAsync { /** * Authenticates with the key vault and shows how to list secrets and list versions of a specific secret in the key * vault with trace spans exported to Zipkin. * * Please refer to the <a href=https: * using a Zipkin exporter. * * @param args Unused. Arguments to the program. * @throws IllegalArgumentException when invalid key vault endpoint is passed. * @throws InterruptedException when the thread is interrupted in sleep mode. */ }
Is there an issue created for this so we dont lose track of it?
public void setup() throws Exception { org.junit.Assume.assumeTrue("Skipping these tests until we mock or record it", false); cachePersister = new CachePersister.Builder().build(); accessAspect = new PersistentTokenCacheAccessAspect(); confApp = ConfidentialClientApplication.builder(TestConfiguration.CONFIDENTIAL_CLIENT_ID, ClientCredentialFactory.create(TestConfiguration.CONFIDENTIAL_CLIENT_SECRET)) .authority(TestConfiguration.TENANT_SPECIFIC_AUTHORITY) .setTokenCacheAccessAspect(accessAspect) .build(); confParameters = ClientCredentialParameters.builder( Collections.singleton(TestConfiguration.GRAPH_DEFAULT_SCOPE)) .build(); }
org.junit.Assume.assumeTrue("Skipping these tests until we mock or record it", false);
public void setup() throws Exception { org.junit.Assume.assumeTrue("Skipping these tests until we mock or record it", false); cachePersister = new CachePersister.Builder().build(); accessAspect = new PersistentTokenCacheAccessAspect(); confApp = ConfidentialClientApplication.builder(TestConfiguration.CONFIDENTIAL_CLIENT_ID, ClientCredentialFactory.create(TestConfiguration.CONFIDENTIAL_CLIENT_SECRET)) .authority(TestConfiguration.TENANT_SPECIFIC_AUTHORITY) .setTokenCacheAccessAspect(accessAspect) .build(); confParameters = ClientCredentialParameters.builder( Collections.singleton(TestConfiguration.GRAPH_DEFAULT_SCOPE)) .build(); }
class CrossProgramVSTest { CachePersister cachePersister; PersistentTokenCacheAccessAspect accessAspect; private ConfidentialClientApplication confApp; private ClientCredentialParameters confParameters; private int count = 0; @Before @Test public void readCacheAfterVSAzureLogin() { byte[] currJsonBytes = cachePersister.readCache(); String currJson = new String(currJsonBytes); JsonObject jsonObj = new JsonParser().parse(currJson).getAsJsonObject(); Assert.assertTrue(jsonObj.has("AccessToken")); Assert.assertTrue(jsonObj.has("RefreshToken")); Assert.assertTrue(jsonObj.has("IdToken")); Assert.assertTrue(jsonObj.has("Account")); Assert.assertTrue(jsonObj.has("AppMetadata")); System.out.println(currJson); count = jsonObj.get("AccessToken").getAsJsonObject().keySet().size(); } @Test public void writeToSameCacheFileAfterVSAzureLogin() { String currJson = new String(cachePersister.readCache()); JsonObject jsonObj = new JsonParser().parse(currJson).getAsJsonObject(); int set = jsonObj.get("AccessToken").getAsJsonObject().keySet().size(); CompletableFuture<IAuthenticationResult> result = confApp.acquireToken(confParameters); result.handle((res, ex) -> { if (ex != null) { System.out.println("Oops! We have an exception - " + ex.getMessage()); return "Unknown!"; } return res; }).join(); currJson = new String(cachePersister.readCache()); jsonObj = new JsonParser().parse(currJson).getAsJsonObject(); int newSet = jsonObj.get("AccessToken").getAsJsonObject().keySet().size(); Assert.assertEquals(newSet, set + 1); count++; System.out.println(currJson); } @Test public void countCache() { byte[] currJsonBytes = cachePersister.readCache(); String currJson = new String(currJsonBytes); JsonObject jsonObj = new JsonParser().parse(currJson).getAsJsonObject(); int newSet = jsonObj.get("AccessToken").getAsJsonObject().keySet().size(); System.out.println(newSet); } @Test public void readCacheAfterPowershellAzureLogin() { byte[] currJsonBytes = cachePersister.readCache(); String currJson = new String(currJsonBytes); JsonObject jsonObj = new JsonParser().parse(currJson).getAsJsonObject(); System.out.println(currJson); int newSet = jsonObj.get("AccessToken").getAsJsonObject().keySet().size(); Assert.assertEquals(newSet, 6); count++; } }
class CrossProgramVSTest { CachePersister cachePersister; PersistentTokenCacheAccessAspect accessAspect; private ConfidentialClientApplication confApp; private ClientCredentialParameters confParameters; private int count = 0; @Before @Test public void readCacheAfterVSAzureLogin() { byte[] currJsonBytes = cachePersister.readCache(); String currJson = new String(currJsonBytes); JsonObject jsonObj = new JsonParser().parse(currJson).getAsJsonObject(); Assert.assertTrue(jsonObj.has("AccessToken")); Assert.assertTrue(jsonObj.has("RefreshToken")); Assert.assertTrue(jsonObj.has("IdToken")); Assert.assertTrue(jsonObj.has("Account")); Assert.assertTrue(jsonObj.has("AppMetadata")); System.out.println(currJson); count = jsonObj.get("AccessToken").getAsJsonObject().keySet().size(); } @Test public void writeToSameCacheFileAfterVSAzureLogin() { String currJson = new String(cachePersister.readCache()); JsonObject jsonObj = new JsonParser().parse(currJson).getAsJsonObject(); int set = jsonObj.get("AccessToken").getAsJsonObject().keySet().size(); CompletableFuture<IAuthenticationResult> result = confApp.acquireToken(confParameters); result.handle((res, ex) -> { if (ex != null) { System.out.println("Oops! We have an exception - " + ex.getMessage()); return "Unknown!"; } return res; }).join(); currJson = new String(cachePersister.readCache()); jsonObj = new JsonParser().parse(currJson).getAsJsonObject(); int newSet = jsonObj.get("AccessToken").getAsJsonObject().keySet().size(); Assert.assertEquals(newSet, set + 1); count++; System.out.println(currJson); } @Test public void countCache() { byte[] currJsonBytes = cachePersister.readCache(); String currJson = new String(currJsonBytes); JsonObject jsonObj = new JsonParser().parse(currJson).getAsJsonObject(); int newSet = jsonObj.get("AccessToken").getAsJsonObject().keySet().size(); System.out.println(newSet); } @Test public void readCacheAfterPowershellAzureLogin() { byte[] currJsonBytes = cachePersister.readCache(); String currJson = new String(currJsonBytes); JsonObject jsonObj = new JsonParser().parse(currJson).getAsJsonObject(); System.out.println(currJson); int newSet = jsonObj.get("AccessToken").getAsJsonObject().keySet().size(); Assert.assertEquals(newSet, 6); count++; } }
Created at https://github.com/Azure/azure-sdk-for-java/issues/5328 and added to next preview release
public void setup() throws Exception { org.junit.Assume.assumeTrue("Skipping these tests until we mock or record it", false); cachePersister = new CachePersister.Builder().build(); accessAspect = new PersistentTokenCacheAccessAspect(); confApp = ConfidentialClientApplication.builder(TestConfiguration.CONFIDENTIAL_CLIENT_ID, ClientCredentialFactory.create(TestConfiguration.CONFIDENTIAL_CLIENT_SECRET)) .authority(TestConfiguration.TENANT_SPECIFIC_AUTHORITY) .setTokenCacheAccessAspect(accessAspect) .build(); confParameters = ClientCredentialParameters.builder( Collections.singleton(TestConfiguration.GRAPH_DEFAULT_SCOPE)) .build(); }
org.junit.Assume.assumeTrue("Skipping these tests until we mock or record it", false);
public void setup() throws Exception { org.junit.Assume.assumeTrue("Skipping these tests until we mock or record it", false); cachePersister = new CachePersister.Builder().build(); accessAspect = new PersistentTokenCacheAccessAspect(); confApp = ConfidentialClientApplication.builder(TestConfiguration.CONFIDENTIAL_CLIENT_ID, ClientCredentialFactory.create(TestConfiguration.CONFIDENTIAL_CLIENT_SECRET)) .authority(TestConfiguration.TENANT_SPECIFIC_AUTHORITY) .setTokenCacheAccessAspect(accessAspect) .build(); confParameters = ClientCredentialParameters.builder( Collections.singleton(TestConfiguration.GRAPH_DEFAULT_SCOPE)) .build(); }
class CrossProgramVSTest { CachePersister cachePersister; PersistentTokenCacheAccessAspect accessAspect; private ConfidentialClientApplication confApp; private ClientCredentialParameters confParameters; private int count = 0; @Before @Test public void readCacheAfterVSAzureLogin() { byte[] currJsonBytes = cachePersister.readCache(); String currJson = new String(currJsonBytes); JsonObject jsonObj = new JsonParser().parse(currJson).getAsJsonObject(); Assert.assertTrue(jsonObj.has("AccessToken")); Assert.assertTrue(jsonObj.has("RefreshToken")); Assert.assertTrue(jsonObj.has("IdToken")); Assert.assertTrue(jsonObj.has("Account")); Assert.assertTrue(jsonObj.has("AppMetadata")); System.out.println(currJson); count = jsonObj.get("AccessToken").getAsJsonObject().keySet().size(); } @Test public void writeToSameCacheFileAfterVSAzureLogin() { String currJson = new String(cachePersister.readCache()); JsonObject jsonObj = new JsonParser().parse(currJson).getAsJsonObject(); int set = jsonObj.get("AccessToken").getAsJsonObject().keySet().size(); CompletableFuture<IAuthenticationResult> result = confApp.acquireToken(confParameters); result.handle((res, ex) -> { if (ex != null) { System.out.println("Oops! We have an exception - " + ex.getMessage()); return "Unknown!"; } return res; }).join(); currJson = new String(cachePersister.readCache()); jsonObj = new JsonParser().parse(currJson).getAsJsonObject(); int newSet = jsonObj.get("AccessToken").getAsJsonObject().keySet().size(); Assert.assertEquals(newSet, set + 1); count++; System.out.println(currJson); } @Test public void countCache() { byte[] currJsonBytes = cachePersister.readCache(); String currJson = new String(currJsonBytes); JsonObject jsonObj = new JsonParser().parse(currJson).getAsJsonObject(); int newSet = jsonObj.get("AccessToken").getAsJsonObject().keySet().size(); System.out.println(newSet); } @Test public void readCacheAfterPowershellAzureLogin() { byte[] currJsonBytes = cachePersister.readCache(); String currJson = new String(currJsonBytes); JsonObject jsonObj = new JsonParser().parse(currJson).getAsJsonObject(); System.out.println(currJson); int newSet = jsonObj.get("AccessToken").getAsJsonObject().keySet().size(); Assert.assertEquals(newSet, 6); count++; } }
class CrossProgramVSTest { CachePersister cachePersister; PersistentTokenCacheAccessAspect accessAspect; private ConfidentialClientApplication confApp; private ClientCredentialParameters confParameters; private int count = 0; @Before @Test public void readCacheAfterVSAzureLogin() { byte[] currJsonBytes = cachePersister.readCache(); String currJson = new String(currJsonBytes); JsonObject jsonObj = new JsonParser().parse(currJson).getAsJsonObject(); Assert.assertTrue(jsonObj.has("AccessToken")); Assert.assertTrue(jsonObj.has("RefreshToken")); Assert.assertTrue(jsonObj.has("IdToken")); Assert.assertTrue(jsonObj.has("Account")); Assert.assertTrue(jsonObj.has("AppMetadata")); System.out.println(currJson); count = jsonObj.get("AccessToken").getAsJsonObject().keySet().size(); } @Test public void writeToSameCacheFileAfterVSAzureLogin() { String currJson = new String(cachePersister.readCache()); JsonObject jsonObj = new JsonParser().parse(currJson).getAsJsonObject(); int set = jsonObj.get("AccessToken").getAsJsonObject().keySet().size(); CompletableFuture<IAuthenticationResult> result = confApp.acquireToken(confParameters); result.handle((res, ex) -> { if (ex != null) { System.out.println("Oops! We have an exception - " + ex.getMessage()); return "Unknown!"; } return res; }).join(); currJson = new String(cachePersister.readCache()); jsonObj = new JsonParser().parse(currJson).getAsJsonObject(); int newSet = jsonObj.get("AccessToken").getAsJsonObject().keySet().size(); Assert.assertEquals(newSet, set + 1); count++; System.out.println(currJson); } @Test public void countCache() { byte[] currJsonBytes = cachePersister.readCache(); String currJson = new String(currJsonBytes); JsonObject jsonObj = new JsonParser().parse(currJson).getAsJsonObject(); int newSet = jsonObj.get("AccessToken").getAsJsonObject().keySet().size(); System.out.println(newSet); } @Test public void readCacheAfterPowershellAzureLogin() { byte[] currJsonBytes = cachePersister.readCache(); String currJson = new String(currJsonBytes); JsonObject jsonObj = new JsonParser().parse(currJson).getAsJsonObject(); System.out.println(currJson); int newSet = jsonObj.get("AccessToken").getAsJsonObject().keySet().size(); Assert.assertEquals(newSet, 6); count++; } }
Shouldn't the `key()` and `value()` method also be renamed? Same in async client too. ``` return setSetting(new ConfigurationSetting().setKey(key).setValue(value), Context.NONE).getValue(); ```
public ConfigurationSetting setSetting(String key, String value) { return setSetting(new ConfigurationSetting().key(key).value(value), Context.NONE).getValue(); }
return setSetting(new ConfigurationSetting().key(key).value(value), Context.NONE).getValue();
public ConfigurationSetting setSetting(String key, String value) { return setSettingWithResponse(new ConfigurationSetting().setKey(key).setValue(value), Context.NONE).getValue(); }
class ConfigurationClient { private final ConfigurationAsyncClient client; /** * Creates a ConfigurationClient that sends requests to the configuration service at {@code serviceEndpoint}. * Each service call goes through the {@code pipeline}. * * @param client The {@link ConfigurationAsyncClient} that the client routes its request through. */ ConfigurationClient(ConfigurationAsyncClient client) { this.client = client; } /** * Adds a configuration value in the service if that key does not exist. * * <p><strong>Code Samples</strong></p> * * <p>Add a setting with the key "prodDBConnection" and value "db_connection".</p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.addSetting * * @param key The key of the configuration setting to add. * @param value The value associated with this configuration setting key. * @return The {@link ConfigurationSetting} that was created, or {@code null}, if a key collision occurs or the key * is an invalid value (which will also throw ServiceRequestException described below). * @throws IllegalArgumentException If {@code key} is {@code null}. * @throws ResourceModifiedException If a ConfigurationSetting with the same key exists. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting addSetting(String key, String value) { return addSetting(new ConfigurationSetting().key(key).value(value), Context.NONE).getValue(); } /** * Adds a configuration value in the service if that key and label does not exist. The label value of the * ConfigurationSetting is optional. * * <p><strong>Code Samples</strong></p> * * <p>Add a setting with the key "prodDBConnection", label "westUS", and value "db_connection".</p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.addSetting * * @param setting The setting to add to the configuration service. * @return The {@link ConfigurationSetting} that was created, or {@code null}, if a key collision occurs or the key * is an invalid value (which will also throw ServiceRequestException described below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceModifiedException If a ConfigurationSetting with the same key and label exists. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting addSetting(ConfigurationSetting setting) { return addSetting(setting, Context.NONE).getValue(); } /** * Adds a configuration value in the service if that key and label does not exist. The label value of the * ConfigurationSetting is optional. * * <p><strong>Code Samples</strong></p> * * <p>Add a setting with the key "prodDBConnection", label "westUS", and value "db_connection".</p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.addSettingWithResponse * * @param setting The setting to add to the configuration service. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A REST response containing the the {@link ConfigurationSetting} that was created, or {@code null}, if a * key collision occurs or the key is an invalid value (which will also throw ServiceRequestException described * below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceModifiedException If a ConfigurationSetting with the same key and label exists. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<ConfigurationSetting> addSettingWithResponse(ConfigurationSetting setting, Context context) { return addSetting(setting, context); } private Response<ConfigurationSetting> addSetting(ConfigurationSetting setting, Context context) { return client.addSetting(setting, context).block(); } /** * Creates or updates a configuration value in the service with the given key. * * <p><strong>Code Samples</strong></p> * * <p>Add a setting with the key "prodDBConnection" and value "db_connection".</p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.setSetting * * @param key The key of the configuration setting to create or update. * @param value The value of this configuration setting. * @return The {@link ConfigurationSetting} that was created or updated, or {@code null}, if the key is an invalid * value (which will also throw ServiceRequestException described below). * @throws IllegalArgumentException If {@code key} is {@code null}. * @throws ResourceModifiedException If the setting exists and is locked. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Creates or updates a configuration value in the service. Partial updates are not supported and the entire * configuration setting is updated. * * If {@link ConfigurationSetting * setting's etag matches. If the etag's value is equal to the wildcard character ({@code "*"}), the setting * will always be updated. * * <p><strong>Code Samples</strong></p> * * <p>Add a setting with the key "prodDBConnection", label "westUS", and value "db_connection". <p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.setSetting * * @param setting The configuration setting to create or update. * @return The {@link ConfigurationSetting} that was created or updated, or {@code null}, if the key is an invalid * value, the setting is locked, or an etag was provided but does not match the service's current etag value * (which will also throw ServiceRequestException described below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceModifiedException If the {@link ConfigurationSetting * wildcard character, and the current configuration value's etag does not match, or the setting exists and is * locked. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting setSetting(ConfigurationSetting setting) { return setSetting(setting, Context.NONE).getValue(); } /** * Creates or updates a configuration value in the service. Partial updates are not supported and the entire * configuration setting is updated. * * If {@link ConfigurationSetting * setting's etag matches. If the etag's value is equal to the wildcard character ({@code "*"}), the setting * will always be updated. * * <p><strong>Code Samples</strong></p> * * <p>Add a setting with the key "prodDBConnection" and value "db_connection".</p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.setSettingWithResponse * * @param setting The configuration setting to create or update. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The {@link ConfigurationSetting} that was created or updated, or {@code null}, if the key is an invalid * value, the setting is locked, or an etag was provided but does not match the service's current etag value * (which will also throw ServiceRequestException described below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceModifiedException If the {@link ConfigurationSetting * wildcard character, and the current configuration value's etag does not match, or the setting exists and is * locked. * @throws HttpResponseException If {@code key} is an empty string. */ public Response<ConfigurationSetting> setSettingWithResponse(ConfigurationSetting setting, Context context) { return client.setSetting(setting, context).block(); } private Response<ConfigurationSetting> setSetting(ConfigurationSetting setting, Context context) { return client.setSetting(setting, context).block(); } /** * Updates an existing configuration value in the service with the given key. The setting must already exist. * * <p><strong>Code Samples</strong></p> * * <p>Update a setting with the key "prodDBConnection" to have the value "updated_db_connection".</p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.updateSetting * * @param key The key of the configuration setting to update. * @param value The updated value of this configuration setting. * @return The {@link ConfigurationSetting} that was updated, or {@code null}, if the configuration value does not * exist, is locked, or the key is an invalid value (which will also throw ServiceRequestException described * below). * @throws IllegalArgumentException If {@code key} is {@code null}. * @throws HttpResponseException If a ConfigurationSetting with the key does not exist or the configuration value is * locked. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting updateSetting(String key, String value) { return updateSetting(new ConfigurationSetting().key(key).value(value), Context.NONE).getValue(); } /** * Updates an existing configuration value in the service. The setting must already exist. Partial updates are not * supported, the entire configuration value is replaced. * * If {@link ConfigurationSetting * * <p><strong>Code Samples</strong></p> * * <p>Update the setting with the key-label pair "prodDBConnection"-"westUS" to have the value * "updated_db_connection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.updateSetting * * @param setting The setting to add or update in the service. * @return The {@link ConfigurationSetting} that was updated, or {@code null}, if the configuration value does not * exist, is locked, or the key is an invalid value (which will also throw ServiceRequestException described * below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceModifiedException If a ConfigurationSetting with the same key and label does not exist, the * setting is locked, or {@link ConfigurationSetting * value. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting updateSetting(ConfigurationSetting setting) { return updateSetting(setting, Context.NONE).getValue(); } /** * Updates an existing configuration value in the service. The setting must already exist. Partial updates are not * supported, the entire configuration value is replaced. * * If {@link ConfigurationSetting * * <p><strong>Code Samples</strong></p> * * <p>Update the setting with the key-label pair "prodDBConnection"-"westUS" to have the value * "updated_db_connection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.updateSettingWithResponse * * @param setting The setting to add or update in the service. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A REST response containing the {@link ConfigurationSetting} that was updated, or {@code null}, if the * configuration value does not exist, is locked, or the key is an invalid value (which will also throw * ServiceRequestException described below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceModifiedException If a ConfigurationSetting with the same key and label does not exist, the * setting is locked, or {@link ConfigurationSetting * value. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<ConfigurationSetting> updateSettingWithResponse(ConfigurationSetting setting, Context context) { return updateSetting(setting, context); } private Response<ConfigurationSetting> updateSetting(ConfigurationSetting setting, Context context) { return client.updateSetting(setting, context).block(); } /** * Attempts to get a ConfigurationSetting that matches the {@code key}. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve the setting with the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.getSetting * * @param key The key of the setting to retrieve. * @return The {@link ConfigurationSetting} stored in the service, or {@code null}, if the configuration value does * not exist or the key is an invalid value (which will also throw ServiceRequestException described below). * @throws IllegalArgumentException If {@code key} is {@code null}. * @throws ResourceNotFoundException If a ConfigurationSetting with {@code key} does not exist. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting getSetting(String key) { return getSetting(new ConfigurationSetting().key(key), Context.NONE).getValue(); } /** * Attempts to get the ConfigurationSetting given the {@code key}, optional {@code label}. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve the setting with the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.getSetting * * @param setting The setting to retrieve based on its key and optional label combination. * @return The {@link ConfigurationSetting} stored in the service, or {@code null}, if the configuration value does * not exist or the key is an invalid value (which will also throw ServiceRequestException described below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceNotFoundException If a ConfigurationSetting with the same key and label does not exist. * @throws HttpResponseException If the {@code} key is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting getSetting(ConfigurationSetting setting) { return getSetting(setting, Context.NONE).getValue(); } /** * Attempts to get the ConfigurationSetting given the {@code key}, optional {@code label}. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve the setting with the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.getSettingWithResponse * * @param setting The setting to retrieve based on its key and optional label combination. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A REST response containg the {@link ConfigurationSetting} stored in the service, or {@code null}, if the * configuration value does not exist or the key is an invalid value (which will also throw * ServiceRequestException described below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceNotFoundException If a ConfigurationSetting with the same key and label does not exist. * @throws HttpResponseException If the {@code} key is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<ConfigurationSetting> getSettingWithResponse(ConfigurationSetting setting, Context context) { return getSetting(setting, context); } private Response<ConfigurationSetting> getSetting(ConfigurationSetting setting, Context context) { return client.getSetting(setting, context).block(); } /** * Deletes the ConfigurationSetting with a matching {@code key}. * * <p><strong>Code Samples</strong></p> * * <p>Delete the setting with the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.deleteSetting * * @param key The key of the setting to delete. * @return The deleted ConfigurationSetting or {@code null} if it didn't exist. {@code null} is also returned if * the {@code key} is an invalid value (which will also throw ServiceRequestException described below). * @throws IllegalArgumentException If {@code key} is {@code null}. * @throws ResourceModifiedException If the ConfigurationSetting is locked. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting deleteSetting(String key) { return deleteSetting(new ConfigurationSetting().key(key), Context.NONE).getValue(); } /** * Deletes the {@link ConfigurationSetting} with a matching key, along with the given label and etag. * * If {@link ConfigurationSetting * then the setting is <b>only</b> deleted if the etag matches the current etag; this means that no one has updated * the ConfigurationSetting yet. * * <p><strong>Code Samples</strong></p> * * <p>Delete the setting with the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.deleteSetting * * @param setting The ConfigurationSetting to delete. * @return The deleted ConfigurationSetting or {@code null} if didn't exist. {@code null} is also returned if * the {@code key} is an invalid value or {@link ConfigurationSetting * current etag (which will also throw ServiceRequestException described below). * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws NullPointerException When {@code setting} is {@code null}. * @throws ResourceModifiedException If the ConfigurationSetting is locked. * @throws ResourceNotFoundException If {@link ConfigurationSetting * character, and does not match the current etag value. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting deleteSetting(ConfigurationSetting setting) { return deleteSetting(setting, Context.NONE).getValue(); } /** * Deletes the {@link ConfigurationSetting} with a matching key, along with the given label and etag. * * If {@link ConfigurationSetting * then the setting is <b>only</b> deleted if the etag matches the current etag; this means that no one has updated * the ConfigurationSetting yet. * * <p><strong>Code Samples</strong></p> * * <p>Delete the setting with the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.deleteSettingWithResponse * * @param setting The ConfigurationSetting to delete. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A REST response containing the deleted ConfigurationSetting or {@code null} if didn't exist. {@code null} * is also returned if the {@code key} is an invalid value or {@link ConfigurationSetting * but does not match the current etag (which will also throw ServiceRequestException described below). * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws NullPointerException When {@code setting} is {@code null}. * @throws ResourceModifiedException If the ConfigurationSetting is locked. * @throws ResourceNotFoundException If {@link ConfigurationSetting * character, and does not match the current etag value. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<ConfigurationSetting> deleteSettingWithResponse(ConfigurationSetting setting, Context context) { return deleteSetting(setting, context); } private Response<ConfigurationSetting> deleteSetting(ConfigurationSetting setting, Context context) { return client.deleteSetting(setting, context).block(); } /** * Fetches the configuration settings that match the {@code options}. If {@code options} is {@code null}, then all * the {@link ConfigurationSetting configuration settings} are fetched with their current values. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve all settings that use the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.listSettings * * @param options Optional. Options to filter configuration setting results from the service. * @return A {@link PagedIterable} of ConfigurationSettings that matches the {@code options}. If no options were * provided, the List contains all of the current settings in the service. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<ConfigurationSetting> listSettings(SettingSelector options) { return listSettings(options, Context.NONE); } /** * Fetches the configuration settings that match the {@code options}. If {@code options} is {@code null}, then all * the {@link ConfigurationSetting configuration settings} are fetched with their current values. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve all settings that use the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.listSettings * * @param options Optional. Options to filter configuration setting results from the service. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A {@link PagedIterable} of ConfigurationSettings that matches the {@code options}. If no options were * provided, the {@link PagedIterable} contains all of the current settings in the service. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<ConfigurationSetting> listSettings(SettingSelector options, Context context) { return new PagedIterable<>(client.listSettings(options, context)); } /** * Lists chronological/historical representation of {@link ConfigurationSetting} resource(s). Revisions are provided * in descending order from their {@link ConfigurationSetting * after a period of time. The service maintains change history for up to 7 days. * * If {@code options} is {@code null}, then all the {@link ConfigurationSetting ConfigurationSettings} are fetched * in their current state. Otherwise, the results returned match the parameters given in {@code options}. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve all revisions of the setting that has the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.listSettingRevisions * * @param selector Optional. Used to filter configuration setting revisions from the service. * @return {@link PagedIterable} of {@link ConfigurationSetting} revisions. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<ConfigurationSetting> listSettingRevisions(SettingSelector selector) { return listSettingRevisions(selector, Context.NONE); } /** * Lists chronological/historical representation of {@link ConfigurationSetting} resource(s). Revisions are provided * in descending order from their {@link ConfigurationSetting * after a period of time. The service maintains change history for up to 7 days. * * If {@code options} is {@code null}, then all the {@link ConfigurationSetting ConfigurationSettings} are fetched * in their current state. Otherwise, the results returned match the parameters given in {@code options}. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve all revisions of the setting that has the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.listSettingRevisions * * @param selector Optional. Used to filter configuration setting revisions from the service. * @param context Additional context that is passed through the Http pipeline during the service call. * @return {@link PagedIterable} of {@link ConfigurationSetting} revisions. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<ConfigurationSetting> listSettingRevisions(SettingSelector selector, Context context) { return new PagedIterable<>(client.listSettingRevisions(selector, context)); } }
class ConfigurationClient { private final ConfigurationAsyncClient client; /** * Creates a ConfigurationClient that sends requests to the configuration service at {@code serviceEndpoint}. Each * service call goes through the {@code pipeline}. * * @param client The {@link ConfigurationAsyncClient} that the client routes its request through. */ ConfigurationClient(ConfigurationAsyncClient client) { this.client = client; } /** * Adds a configuration value in the service if that key does not exist. * * <p><strong>Code Samples</strong></p> * * <p>Add a setting with the key "prodDBConnection" and value "db_connection".</p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.addSetting * * @param key The key of the configuration setting to add. * @param value The value associated with this configuration setting key. * @return The {@link ConfigurationSetting} that was created, or {@code null}, if a key collision occurs or the key * is an invalid value (which will also throw ServiceRequestException described below). * @throws IllegalArgumentException If {@code key} is {@code null}. * @throws ResourceModifiedException If a ConfigurationSetting with the same key exists. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting addSetting(String key, String value) { return addSetting(new ConfigurationSetting().setKey(key).setValue(value), Context.NONE).getValue(); } /** * Adds a configuration value in the service if that key and label does not exist. The label value of the * ConfigurationSetting is optional. * * <p><strong>Code Samples</strong></p> * * <p>Add a setting with the key "prodDBConnection", label "westUS", and value "db_connection".</p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.addSetting * * @param setting The setting to add to the configuration service. * @return The {@link ConfigurationSetting} that was created, or {@code null}, if a key collision occurs or the key * is an invalid value (which will also throw ServiceRequestException described below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceModifiedException If a ConfigurationSetting with the same key and label exists. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting addSetting(ConfigurationSetting setting) { return addSetting(setting, Context.NONE).getValue(); } /** * Adds a configuration value in the service if that key and label does not exist. The label value of the * ConfigurationSetting is optional. * * <p><strong>Code Samples</strong></p> * * <p>Add a setting with the key "prodDBConnection", label "westUS", and value "db_connection".</p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.addSettingWithResponse * * @param setting The setting to add to the configuration service. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A REST response containing the the {@link ConfigurationSetting} that was created, or {@code null}, if a * key collision occurs or the key is an invalid value (which will also throw ServiceRequestException described * below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceModifiedException If a ConfigurationSetting with the same key and label exists. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<ConfigurationSetting> addSettingWithResponse(ConfigurationSetting setting, Context context) { return addSetting(setting, context); } private Response<ConfigurationSetting> addSetting(ConfigurationSetting setting, Context context) { return client.addSetting(setting, context).block(); } /** * Creates or updates a configuration value in the service with the given key. * * <p><strong>Code Samples</strong></p> * * <p>Add a setting with the key "prodDBConnection" and value "db_connection".</p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.setSetting * * @param key The key of the configuration setting to create or update. * @param value The value of this configuration setting. * @return The {@link ConfigurationSetting} that was created or updated, or {@code null}, if the key is an invalid * value (which will also throw ServiceRequestException described below). * @throws IllegalArgumentException If {@code key} is {@code null}. * @throws ResourceModifiedException If the setting exists and is locked. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Creates or updates a configuration value in the service. Partial updates are not supported and the entire * configuration setting is updated. * * If {@link ConfigurationSetting * setting's etag matches. If the etag's value is equal to the wildcard character ({@code "*"}), the setting will * always be updated. * * <p><strong>Code Samples</strong></p> * * <p>Add a setting with the key "prodDBConnection", label "westUS", and value "db_connection". <p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.setSetting * * @param setting The configuration setting to create or update. * @return The {@link ConfigurationSetting} that was created or updated, or {@code null}, if the key is an invalid * value, the setting is locked, or an etag was provided but does not match the service's current etag value (which * will also throw ServiceRequestException described below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceModifiedException If the {@link ConfigurationSetting * wildcard character, and the current configuration value's etag does not match, or the setting exists and is * locked. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting setSetting(ConfigurationSetting setting) { return setSettingWithResponse(setting, Context.NONE).getValue(); } /** * Creates or updates a configuration value in the service. Partial updates are not supported and the entire * configuration setting is updated. * * If {@link ConfigurationSetting * setting's etag matches. If the etag's value is equal to the wildcard character ({@code "*"}), the setting will * always be updated. * * <p><strong>Code Samples</strong></p> * * <p>Add a setting with the key "prodDBConnection" and value "db_connection".</p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.setSettingWithResponse * * @param setting The configuration setting to create or update. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The {@link ConfigurationSetting} that was created or updated, or {@code null}, if the key is an invalid * value, the setting is locked, or an etag was provided but does not match the service's current etag value (which * will also throw ServiceRequestException described below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceModifiedException If the {@link ConfigurationSetting * wildcard character, and the current configuration value's etag does not match, or the setting exists and is * locked. * @throws HttpResponseException If {@code key} is an empty string. */ public Response<ConfigurationSetting> setSettingWithResponse(ConfigurationSetting setting, Context context) { return client.setSetting(setting, context).block(); } /** * Updates an existing configuration value in the service with the given key. The setting must already exist. * * <p><strong>Code Samples</strong></p> * * <p>Update a setting with the key "prodDBConnection" to have the value "updated_db_connection".</p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.updateSetting * * @param key The key of the configuration setting to update. * @param value The updated value of this configuration setting. * @return The {@link ConfigurationSetting} that was updated, or {@code null}, if the configuration value does not * exist, is locked, or the key is an invalid value (which will also throw ServiceRequestException described * below). * @throws IllegalArgumentException If {@code key} is {@code null}. * @throws HttpResponseException If a ConfigurationSetting with the key does not exist or the configuration value is * locked. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting updateSetting(String key, String value) { return updateSetting(new ConfigurationSetting().setKey(key).setValue(value), Context.NONE).getValue(); } /** * Updates an existing configuration value in the service. The setting must already exist. Partial updates are not * supported, the entire configuration value is replaced. * * If {@link ConfigurationSetting * matches. * * <p><strong>Code Samples</strong></p> * * <p>Update the setting with the key-label pair "prodDBConnection"-"westUS" to have the value * "updated_db_connection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.updateSetting * * @param setting The setting to add or update in the service. * @return The {@link ConfigurationSetting} that was updated, or {@code null}, if the configuration value does not * exist, is locked, or the key is an invalid value (which will also throw ServiceRequestException described * below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceModifiedException If a ConfigurationSetting with the same key and label does not exist, the * setting is locked, or {@link ConfigurationSetting * value. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting updateSetting(ConfigurationSetting setting) { return updateSetting(setting, Context.NONE).getValue(); } /** * Updates an existing configuration value in the service. The setting must already exist. Partial updates are not * supported, the entire configuration value is replaced. * * If {@link ConfigurationSetting * matches. * * <p><strong>Code Samples</strong></p> * * <p>Update the setting with the key-label pair "prodDBConnection"-"westUS" to have the value * "updated_db_connection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.updateSettingWithResponse * * @param setting The setting to add or update in the service. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A REST response containing the {@link ConfigurationSetting} that was updated, or {@code null}, if the * configuration value does not exist, is locked, or the key is an invalid value (which will also throw * ServiceRequestException described below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceModifiedException If a ConfigurationSetting with the same key and label does not exist, the * setting is locked, or {@link ConfigurationSetting * value. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<ConfigurationSetting> updateSettingWithResponse(ConfigurationSetting setting, Context context) { return updateSetting(setting, context); } private Response<ConfigurationSetting> updateSetting(ConfigurationSetting setting, Context context) { return client.updateSetting(setting, context).block(); } /** * Attempts to get a ConfigurationSetting that matches the {@code key}. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve the setting with the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.getSetting * * @param key The key of the setting to retrieve. * @return The {@link ConfigurationSetting} stored in the service, or {@code null}, if the configuration value does * not exist or the key is an invalid value (which will also throw ServiceRequestException described below). * @throws IllegalArgumentException If {@code key} is {@code null}. * @throws ResourceNotFoundException If a ConfigurationSetting with {@code key} does not exist. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting getSetting(String key) { return getSetting(new ConfigurationSetting().setKey(key), Context.NONE).getValue(); } /** * Attempts to get the ConfigurationSetting given the {@code key}, optional {@code label}. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve the setting with the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.getSetting * * @param setting The setting to retrieve based on its key and optional label combination. * @return The {@link ConfigurationSetting} stored in the service, or {@code null}, if the configuration value does * not exist or the key is an invalid value (which will also throw ServiceRequestException described below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceNotFoundException If a ConfigurationSetting with the same key and label does not exist. * @throws HttpResponseException If the {@code} key is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting getSetting(ConfigurationSetting setting) { return getSetting(setting, Context.NONE).getValue(); } /** * Attempts to get the ConfigurationSetting given the {@code key}, optional {@code label}. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve the setting with the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.getSettingWithResponse * * @param setting The setting to retrieve based on its key and optional label combination. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A REST response containg the {@link ConfigurationSetting} stored in the service, or {@code null}, if the * configuration value does not exist or the key is an invalid value (which will also throw ServiceRequestException * described below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceNotFoundException If a ConfigurationSetting with the same key and label does not exist. * @throws HttpResponseException If the {@code} key is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<ConfigurationSetting> getSettingWithResponse(ConfigurationSetting setting, Context context) { return getSetting(setting, context); } private Response<ConfigurationSetting> getSetting(ConfigurationSetting setting, Context context) { return client.getSetting(setting, context).block(); } /** * Deletes the ConfigurationSetting with a matching {@code key}. * * <p><strong>Code Samples</strong></p> * * <p>Delete the setting with the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.deleteSetting * * @param key The key of the setting to delete. * @return The deleted ConfigurationSetting or {@code null} if it didn't exist. {@code null} is also returned if the * {@code key} is an invalid value (which will also throw ServiceRequestException described below). * @throws IllegalArgumentException If {@code key} is {@code null}. * @throws ResourceModifiedException If the ConfigurationSetting is locked. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting deleteSetting(String key) { return deleteSetting(new ConfigurationSetting().setKey(key), Context.NONE).getValue(); } /** * Deletes the {@link ConfigurationSetting} with a matching key, along with the given label and etag. * * If {@link ConfigurationSetting * the setting is <b>only</b> deleted if the etag matches the current etag; this means that no one has updated the * ConfigurationSetting yet. * * <p><strong>Code Samples</strong></p> * * <p>Delete the setting with the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.deleteSetting * * @param setting The ConfigurationSetting to delete. * @return The deleted ConfigurationSetting or {@code null} if didn't exist. {@code null} is also returned if the * {@code key} is an invalid value or {@link ConfigurationSetting * current etag (which will also throw ServiceRequestException described below). * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws NullPointerException When {@code setting} is {@code null}. * @throws ResourceModifiedException If the ConfigurationSetting is locked. * @throws ResourceNotFoundException If {@link ConfigurationSetting * character, and does not match the current etag value. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting deleteSetting(ConfigurationSetting setting) { return deleteSetting(setting, Context.NONE).getValue(); } /** * Deletes the {@link ConfigurationSetting} with a matching key, along with the given label and etag. * * If {@link ConfigurationSetting * the setting is <b>only</b> deleted if the etag matches the current etag; this means that no one has updated the * ConfigurationSetting yet. * * <p><strong>Code Samples</strong></p> * * <p>Delete the setting with the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.deleteSettingWithResponse * * @param setting The ConfigurationSetting to delete. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A REST response containing the deleted ConfigurationSetting or {@code null} if didn't exist. {@code null} * is also returned if the {@code key} is an invalid value or {@link ConfigurationSetting * does not match the current etag (which will also throw ServiceRequestException described below). * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws NullPointerException When {@code setting} is {@code null}. * @throws ResourceModifiedException If the ConfigurationSetting is locked. * @throws ResourceNotFoundException If {@link ConfigurationSetting * character, and does not match the current etag value. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<ConfigurationSetting> deleteSettingWithResponse(ConfigurationSetting setting, Context context) { return deleteSetting(setting, context); } private Response<ConfigurationSetting> deleteSetting(ConfigurationSetting setting, Context context) { return client.deleteSetting(setting, context).block(); } /** * Fetches the configuration settings that match the {@code options}. If {@code options} is {@code null}, then all * the {@link ConfigurationSetting configuration settings} are fetched with their current values. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve all settings that use the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.listSettings * * @param options Optional. Options to filter configuration setting results from the service. * @return A {@link PagedIterable} of ConfigurationSettings that matches the {@code options}. If no options were * provided, the List contains all of the current settings in the service. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<ConfigurationSetting> listSettings(SettingSelector options) { return listSettings(options, Context.NONE); } /** * Fetches the configuration settings that match the {@code options}. If {@code options} is {@code null}, then all * the {@link ConfigurationSetting configuration settings} are fetched with their current values. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve all settings that use the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.listSettings * * @param options Optional. Options to filter configuration setting results from the service. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A {@link PagedIterable} of ConfigurationSettings that matches the {@code options}. If no options were * provided, the {@link PagedIterable} contains all of the current settings in the service. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<ConfigurationSetting> listSettings(SettingSelector options, Context context) { return new PagedIterable<>(client.listSettings(options, context)); } /** * Lists chronological/historical representation of {@link ConfigurationSetting} resource(s). Revisions are provided * in descending order from their {@link ConfigurationSetting * after a period of time. The service maintains change history for up to 7 days. * * If {@code options} is {@code null}, then all the {@link ConfigurationSetting ConfigurationSettings} are fetched * in their current state. Otherwise, the results returned match the parameters given in {@code options}. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve all revisions of the setting that has the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.listSettingRevisions * * @param selector Optional. Used to filter configuration setting revisions from the service. * @return {@link PagedIterable} of {@link ConfigurationSetting} revisions. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<ConfigurationSetting> listSettingRevisions(SettingSelector selector) { return listSettingRevisions(selector, Context.NONE); } /** * Lists chronological/historical representation of {@link ConfigurationSetting} resource(s). Revisions are provided * in descending order from their {@link ConfigurationSetting * after a period of time. The service maintains change history for up to 7 days. * * If {@code options} is {@code null}, then all the {@link ConfigurationSetting ConfigurationSettings} are fetched * in their current state. Otherwise, the results returned match the parameters given in {@code options}. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve all revisions of the setting that has the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.listSettingRevisions * * @param selector Optional. Used to filter configuration setting revisions from the service. * @param context Additional context that is passed through the Http pipeline during the service call. * @return {@link PagedIterable} of {@link ConfigurationSetting} revisions. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<ConfigurationSetting> listSettingRevisions(SettingSelector selector, Context context) { return new PagedIterable<>(client.listSettingRevisions(selector, context)); } }
same here
public ConfigurationSetting updateSetting(String key, String value) { return updateSetting(new ConfigurationSetting().key(key).value(value), Context.NONE).getValue(); }
return updateSetting(new ConfigurationSetting().key(key).value(value), Context.NONE).getValue();
public ConfigurationSetting updateSetting(String key, String value) { return updateSetting(new ConfigurationSetting().setKey(key).setValue(value), Context.NONE).getValue(); }
class ConfigurationClient { private final ConfigurationAsyncClient client; /** * Creates a ConfigurationClient that sends requests to the configuration service at {@code serviceEndpoint}. * Each service call goes through the {@code pipeline}. * * @param client The {@link ConfigurationAsyncClient} that the client routes its request through. */ ConfigurationClient(ConfigurationAsyncClient client) { this.client = client; } /** * Adds a configuration value in the service if that key does not exist. * * <p><strong>Code Samples</strong></p> * * <p>Add a setting with the key "prodDBConnection" and value "db_connection".</p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.addSetting * * @param key The key of the configuration setting to add. * @param value The value associated with this configuration setting key. * @return The {@link ConfigurationSetting} that was created, or {@code null}, if a key collision occurs or the key * is an invalid value (which will also throw ServiceRequestException described below). * @throws IllegalArgumentException If {@code key} is {@code null}. * @throws ResourceModifiedException If a ConfigurationSetting with the same key exists. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting addSetting(String key, String value) { return addSetting(new ConfigurationSetting().key(key).value(value), Context.NONE).getValue(); } /** * Adds a configuration value in the service if that key and label does not exist. The label value of the * ConfigurationSetting is optional. * * <p><strong>Code Samples</strong></p> * * <p>Add a setting with the key "prodDBConnection", label "westUS", and value "db_connection".</p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.addSetting * * @param setting The setting to add to the configuration service. * @return The {@link ConfigurationSetting} that was created, or {@code null}, if a key collision occurs or the key * is an invalid value (which will also throw ServiceRequestException described below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceModifiedException If a ConfigurationSetting with the same key and label exists. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting addSetting(ConfigurationSetting setting) { return addSetting(setting, Context.NONE).getValue(); } /** * Adds a configuration value in the service if that key and label does not exist. The label value of the * ConfigurationSetting is optional. * * <p><strong>Code Samples</strong></p> * * <p>Add a setting with the key "prodDBConnection", label "westUS", and value "db_connection".</p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.addSettingWithResponse * * @param setting The setting to add to the configuration service. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A REST response containing the the {@link ConfigurationSetting} that was created, or {@code null}, if a * key collision occurs or the key is an invalid value (which will also throw ServiceRequestException described * below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceModifiedException If a ConfigurationSetting with the same key and label exists. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<ConfigurationSetting> addSettingWithResponse(ConfigurationSetting setting, Context context) { return addSetting(setting, context); } private Response<ConfigurationSetting> addSetting(ConfigurationSetting setting, Context context) { return client.addSetting(setting, context).block(); } /** * Creates or updates a configuration value in the service with the given key. * * <p><strong>Code Samples</strong></p> * * <p>Add a setting with the key "prodDBConnection" and value "db_connection".</p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.setSetting * * @param key The key of the configuration setting to create or update. * @param value The value of this configuration setting. * @return The {@link ConfigurationSetting} that was created or updated, or {@code null}, if the key is an invalid * value (which will also throw ServiceRequestException described below). * @throws IllegalArgumentException If {@code key} is {@code null}. * @throws ResourceModifiedException If the setting exists and is locked. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting setSetting(String key, String value) { return setSetting(new ConfigurationSetting().key(key).value(value), Context.NONE).getValue(); } /** * Creates or updates a configuration value in the service. Partial updates are not supported and the entire * configuration setting is updated. * * If {@link ConfigurationSetting * setting's etag matches. If the etag's value is equal to the wildcard character ({@code "*"}), the setting * will always be updated. * * <p><strong>Code Samples</strong></p> * * <p>Add a setting with the key "prodDBConnection", label "westUS", and value "db_connection". <p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.setSetting * * @param setting The configuration setting to create or update. * @return The {@link ConfigurationSetting} that was created or updated, or {@code null}, if the key is an invalid * value, the setting is locked, or an etag was provided but does not match the service's current etag value * (which will also throw ServiceRequestException described below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceModifiedException If the {@link ConfigurationSetting * wildcard character, and the current configuration value's etag does not match, or the setting exists and is * locked. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting setSetting(ConfigurationSetting setting) { return setSetting(setting, Context.NONE).getValue(); } /** * Creates or updates a configuration value in the service. Partial updates are not supported and the entire * configuration setting is updated. * * If {@link ConfigurationSetting * setting's etag matches. If the etag's value is equal to the wildcard character ({@code "*"}), the setting * will always be updated. * * <p><strong>Code Samples</strong></p> * * <p>Add a setting with the key "prodDBConnection" and value "db_connection".</p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.setSettingWithResponse * * @param setting The configuration setting to create or update. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The {@link ConfigurationSetting} that was created or updated, or {@code null}, if the key is an invalid * value, the setting is locked, or an etag was provided but does not match the service's current etag value * (which will also throw ServiceRequestException described below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceModifiedException If the {@link ConfigurationSetting * wildcard character, and the current configuration value's etag does not match, or the setting exists and is * locked. * @throws HttpResponseException If {@code key} is an empty string. */ public Response<ConfigurationSetting> setSettingWithResponse(ConfigurationSetting setting, Context context) { return client.setSetting(setting, context).block(); } private Response<ConfigurationSetting> setSetting(ConfigurationSetting setting, Context context) { return client.setSetting(setting, context).block(); } /** * Updates an existing configuration value in the service with the given key. The setting must already exist. * * <p><strong>Code Samples</strong></p> * * <p>Update a setting with the key "prodDBConnection" to have the value "updated_db_connection".</p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.updateSetting * * @param key The key of the configuration setting to update. * @param value The updated value of this configuration setting. * @return The {@link ConfigurationSetting} that was updated, or {@code null}, if the configuration value does not * exist, is locked, or the key is an invalid value (which will also throw ServiceRequestException described * below). * @throws IllegalArgumentException If {@code key} is {@code null}. * @throws HttpResponseException If a ConfigurationSetting with the key does not exist or the configuration value is * locked. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Updates an existing configuration value in the service. The setting must already exist. Partial updates are not * supported, the entire configuration value is replaced. * * If {@link ConfigurationSetting * * <p><strong>Code Samples</strong></p> * * <p>Update the setting with the key-label pair "prodDBConnection"-"westUS" to have the value * "updated_db_connection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.updateSetting * * @param setting The setting to add or update in the service. * @return The {@link ConfigurationSetting} that was updated, or {@code null}, if the configuration value does not * exist, is locked, or the key is an invalid value (which will also throw ServiceRequestException described * below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceModifiedException If a ConfigurationSetting with the same key and label does not exist, the * setting is locked, or {@link ConfigurationSetting * value. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting updateSetting(ConfigurationSetting setting) { return updateSetting(setting, Context.NONE).getValue(); } /** * Updates an existing configuration value in the service. The setting must already exist. Partial updates are not * supported, the entire configuration value is replaced. * * If {@link ConfigurationSetting * * <p><strong>Code Samples</strong></p> * * <p>Update the setting with the key-label pair "prodDBConnection"-"westUS" to have the value * "updated_db_connection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.updateSettingWithResponse * * @param setting The setting to add or update in the service. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A REST response containing the {@link ConfigurationSetting} that was updated, or {@code null}, if the * configuration value does not exist, is locked, or the key is an invalid value (which will also throw * ServiceRequestException described below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceModifiedException If a ConfigurationSetting with the same key and label does not exist, the * setting is locked, or {@link ConfigurationSetting * value. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<ConfigurationSetting> updateSettingWithResponse(ConfigurationSetting setting, Context context) { return updateSetting(setting, context); } private Response<ConfigurationSetting> updateSetting(ConfigurationSetting setting, Context context) { return client.updateSetting(setting, context).block(); } /** * Attempts to get a ConfigurationSetting that matches the {@code key}. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve the setting with the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.getSetting * * @param key The key of the setting to retrieve. * @return The {@link ConfigurationSetting} stored in the service, or {@code null}, if the configuration value does * not exist or the key is an invalid value (which will also throw ServiceRequestException described below). * @throws IllegalArgumentException If {@code key} is {@code null}. * @throws ResourceNotFoundException If a ConfigurationSetting with {@code key} does not exist. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting getSetting(String key) { return getSetting(new ConfigurationSetting().key(key), Context.NONE).getValue(); } /** * Attempts to get the ConfigurationSetting given the {@code key}, optional {@code label}. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve the setting with the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.getSetting * * @param setting The setting to retrieve based on its key and optional label combination. * @return The {@link ConfigurationSetting} stored in the service, or {@code null}, if the configuration value does * not exist or the key is an invalid value (which will also throw ServiceRequestException described below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceNotFoundException If a ConfigurationSetting with the same key and label does not exist. * @throws HttpResponseException If the {@code} key is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting getSetting(ConfigurationSetting setting) { return getSetting(setting, Context.NONE).getValue(); } /** * Attempts to get the ConfigurationSetting given the {@code key}, optional {@code label}. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve the setting with the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.getSettingWithResponse * * @param setting The setting to retrieve based on its key and optional label combination. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A REST response containg the {@link ConfigurationSetting} stored in the service, or {@code null}, if the * configuration value does not exist or the key is an invalid value (which will also throw * ServiceRequestException described below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceNotFoundException If a ConfigurationSetting with the same key and label does not exist. * @throws HttpResponseException If the {@code} key is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<ConfigurationSetting> getSettingWithResponse(ConfigurationSetting setting, Context context) { return getSetting(setting, context); } private Response<ConfigurationSetting> getSetting(ConfigurationSetting setting, Context context) { return client.getSetting(setting, context).block(); } /** * Deletes the ConfigurationSetting with a matching {@code key}. * * <p><strong>Code Samples</strong></p> * * <p>Delete the setting with the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.deleteSetting * * @param key The key of the setting to delete. * @return The deleted ConfigurationSetting or {@code null} if it didn't exist. {@code null} is also returned if * the {@code key} is an invalid value (which will also throw ServiceRequestException described below). * @throws IllegalArgumentException If {@code key} is {@code null}. * @throws ResourceModifiedException If the ConfigurationSetting is locked. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting deleteSetting(String key) { return deleteSetting(new ConfigurationSetting().key(key), Context.NONE).getValue(); } /** * Deletes the {@link ConfigurationSetting} with a matching key, along with the given label and etag. * * If {@link ConfigurationSetting * then the setting is <b>only</b> deleted if the etag matches the current etag; this means that no one has updated * the ConfigurationSetting yet. * * <p><strong>Code Samples</strong></p> * * <p>Delete the setting with the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.deleteSetting * * @param setting The ConfigurationSetting to delete. * @return The deleted ConfigurationSetting or {@code null} if didn't exist. {@code null} is also returned if * the {@code key} is an invalid value or {@link ConfigurationSetting * current etag (which will also throw ServiceRequestException described below). * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws NullPointerException When {@code setting} is {@code null}. * @throws ResourceModifiedException If the ConfigurationSetting is locked. * @throws ResourceNotFoundException If {@link ConfigurationSetting * character, and does not match the current etag value. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting deleteSetting(ConfigurationSetting setting) { return deleteSetting(setting, Context.NONE).getValue(); } /** * Deletes the {@link ConfigurationSetting} with a matching key, along with the given label and etag. * * If {@link ConfigurationSetting * then the setting is <b>only</b> deleted if the etag matches the current etag; this means that no one has updated * the ConfigurationSetting yet. * * <p><strong>Code Samples</strong></p> * * <p>Delete the setting with the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.deleteSettingWithResponse * * @param setting The ConfigurationSetting to delete. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A REST response containing the deleted ConfigurationSetting or {@code null} if didn't exist. {@code null} * is also returned if the {@code key} is an invalid value or {@link ConfigurationSetting * but does not match the current etag (which will also throw ServiceRequestException described below). * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws NullPointerException When {@code setting} is {@code null}. * @throws ResourceModifiedException If the ConfigurationSetting is locked. * @throws ResourceNotFoundException If {@link ConfigurationSetting * character, and does not match the current etag value. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<ConfigurationSetting> deleteSettingWithResponse(ConfigurationSetting setting, Context context) { return deleteSetting(setting, context); } private Response<ConfigurationSetting> deleteSetting(ConfigurationSetting setting, Context context) { return client.deleteSetting(setting, context).block(); } /** * Fetches the configuration settings that match the {@code options}. If {@code options} is {@code null}, then all * the {@link ConfigurationSetting configuration settings} are fetched with their current values. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve all settings that use the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.listSettings * * @param options Optional. Options to filter configuration setting results from the service. * @return A {@link PagedIterable} of ConfigurationSettings that matches the {@code options}. If no options were * provided, the List contains all of the current settings in the service. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<ConfigurationSetting> listSettings(SettingSelector options) { return listSettings(options, Context.NONE); } /** * Fetches the configuration settings that match the {@code options}. If {@code options} is {@code null}, then all * the {@link ConfigurationSetting configuration settings} are fetched with their current values. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve all settings that use the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.listSettings * * @param options Optional. Options to filter configuration setting results from the service. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A {@link PagedIterable} of ConfigurationSettings that matches the {@code options}. If no options were * provided, the {@link PagedIterable} contains all of the current settings in the service. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<ConfigurationSetting> listSettings(SettingSelector options, Context context) { return new PagedIterable<>(client.listSettings(options, context)); } /** * Lists chronological/historical representation of {@link ConfigurationSetting} resource(s). Revisions are provided * in descending order from their {@link ConfigurationSetting * after a period of time. The service maintains change history for up to 7 days. * * If {@code options} is {@code null}, then all the {@link ConfigurationSetting ConfigurationSettings} are fetched * in their current state. Otherwise, the results returned match the parameters given in {@code options}. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve all revisions of the setting that has the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.listSettingRevisions * * @param selector Optional. Used to filter configuration setting revisions from the service. * @return {@link PagedIterable} of {@link ConfigurationSetting} revisions. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<ConfigurationSetting> listSettingRevisions(SettingSelector selector) { return listSettingRevisions(selector, Context.NONE); } /** * Lists chronological/historical representation of {@link ConfigurationSetting} resource(s). Revisions are provided * in descending order from their {@link ConfigurationSetting * after a period of time. The service maintains change history for up to 7 days. * * If {@code options} is {@code null}, then all the {@link ConfigurationSetting ConfigurationSettings} are fetched * in their current state. Otherwise, the results returned match the parameters given in {@code options}. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve all revisions of the setting that has the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.listSettingRevisions * * @param selector Optional. Used to filter configuration setting revisions from the service. * @param context Additional context that is passed through the Http pipeline during the service call. * @return {@link PagedIterable} of {@link ConfigurationSetting} revisions. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<ConfigurationSetting> listSettingRevisions(SettingSelector selector, Context context) { return new PagedIterable<>(client.listSettingRevisions(selector, context)); } }
class ConfigurationClient { private final ConfigurationAsyncClient client; /** * Creates a ConfigurationClient that sends requests to the configuration service at {@code serviceEndpoint}. Each * service call goes through the {@code pipeline}. * * @param client The {@link ConfigurationAsyncClient} that the client routes its request through. */ ConfigurationClient(ConfigurationAsyncClient client) { this.client = client; } /** * Adds a configuration value in the service if that key does not exist. * * <p><strong>Code Samples</strong></p> * * <p>Add a setting with the key "prodDBConnection" and value "db_connection".</p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.addSetting * * @param key The key of the configuration setting to add. * @param value The value associated with this configuration setting key. * @return The {@link ConfigurationSetting} that was created, or {@code null}, if a key collision occurs or the key * is an invalid value (which will also throw ServiceRequestException described below). * @throws IllegalArgumentException If {@code key} is {@code null}. * @throws ResourceModifiedException If a ConfigurationSetting with the same key exists. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting addSetting(String key, String value) { return addSetting(new ConfigurationSetting().setKey(key).setValue(value), Context.NONE).getValue(); } /** * Adds a configuration value in the service if that key and label does not exist. The label value of the * ConfigurationSetting is optional. * * <p><strong>Code Samples</strong></p> * * <p>Add a setting with the key "prodDBConnection", label "westUS", and value "db_connection".</p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.addSetting * * @param setting The setting to add to the configuration service. * @return The {@link ConfigurationSetting} that was created, or {@code null}, if a key collision occurs or the key * is an invalid value (which will also throw ServiceRequestException described below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceModifiedException If a ConfigurationSetting with the same key and label exists. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting addSetting(ConfigurationSetting setting) { return addSetting(setting, Context.NONE).getValue(); } /** * Adds a configuration value in the service if that key and label does not exist. The label value of the * ConfigurationSetting is optional. * * <p><strong>Code Samples</strong></p> * * <p>Add a setting with the key "prodDBConnection", label "westUS", and value "db_connection".</p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.addSettingWithResponse * * @param setting The setting to add to the configuration service. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A REST response containing the the {@link ConfigurationSetting} that was created, or {@code null}, if a * key collision occurs or the key is an invalid value (which will also throw ServiceRequestException described * below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceModifiedException If a ConfigurationSetting with the same key and label exists. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<ConfigurationSetting> addSettingWithResponse(ConfigurationSetting setting, Context context) { return addSetting(setting, context); } private Response<ConfigurationSetting> addSetting(ConfigurationSetting setting, Context context) { return client.addSetting(setting, context).block(); } /** * Creates or updates a configuration value in the service with the given key. * * <p><strong>Code Samples</strong></p> * * <p>Add a setting with the key "prodDBConnection" and value "db_connection".</p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.setSetting * * @param key The key of the configuration setting to create or update. * @param value The value of this configuration setting. * @return The {@link ConfigurationSetting} that was created or updated, or {@code null}, if the key is an invalid * value (which will also throw ServiceRequestException described below). * @throws IllegalArgumentException If {@code key} is {@code null}. * @throws ResourceModifiedException If the setting exists and is locked. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting setSetting(String key, String value) { return setSettingWithResponse(new ConfigurationSetting().setKey(key).setValue(value), Context.NONE).getValue(); } /** * Creates or updates a configuration value in the service. Partial updates are not supported and the entire * configuration setting is updated. * * If {@link ConfigurationSetting * setting's etag matches. If the etag's value is equal to the wildcard character ({@code "*"}), the setting will * always be updated. * * <p><strong>Code Samples</strong></p> * * <p>Add a setting with the key "prodDBConnection", label "westUS", and value "db_connection". <p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.setSetting * * @param setting The configuration setting to create or update. * @return The {@link ConfigurationSetting} that was created or updated, or {@code null}, if the key is an invalid * value, the setting is locked, or an etag was provided but does not match the service's current etag value (which * will also throw ServiceRequestException described below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceModifiedException If the {@link ConfigurationSetting * wildcard character, and the current configuration value's etag does not match, or the setting exists and is * locked. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting setSetting(ConfigurationSetting setting) { return setSettingWithResponse(setting, Context.NONE).getValue(); } /** * Creates or updates a configuration value in the service. Partial updates are not supported and the entire * configuration setting is updated. * * If {@link ConfigurationSetting * setting's etag matches. If the etag's value is equal to the wildcard character ({@code "*"}), the setting will * always be updated. * * <p><strong>Code Samples</strong></p> * * <p>Add a setting with the key "prodDBConnection" and value "db_connection".</p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.setSettingWithResponse * * @param setting The configuration setting to create or update. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The {@link ConfigurationSetting} that was created or updated, or {@code null}, if the key is an invalid * value, the setting is locked, or an etag was provided but does not match the service's current etag value (which * will also throw ServiceRequestException described below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceModifiedException If the {@link ConfigurationSetting * wildcard character, and the current configuration value's etag does not match, or the setting exists and is * locked. * @throws HttpResponseException If {@code key} is an empty string. */ public Response<ConfigurationSetting> setSettingWithResponse(ConfigurationSetting setting, Context context) { return client.setSetting(setting, context).block(); } /** * Updates an existing configuration value in the service with the given key. The setting must already exist. * * <p><strong>Code Samples</strong></p> * * <p>Update a setting with the key "prodDBConnection" to have the value "updated_db_connection".</p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.updateSetting * * @param key The key of the configuration setting to update. * @param value The updated value of this configuration setting. * @return The {@link ConfigurationSetting} that was updated, or {@code null}, if the configuration value does not * exist, is locked, or the key is an invalid value (which will also throw ServiceRequestException described * below). * @throws IllegalArgumentException If {@code key} is {@code null}. * @throws HttpResponseException If a ConfigurationSetting with the key does not exist or the configuration value is * locked. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Updates an existing configuration value in the service. The setting must already exist. Partial updates are not * supported, the entire configuration value is replaced. * * If {@link ConfigurationSetting * matches. * * <p><strong>Code Samples</strong></p> * * <p>Update the setting with the key-label pair "prodDBConnection"-"westUS" to have the value * "updated_db_connection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.updateSetting * * @param setting The setting to add or update in the service. * @return The {@link ConfigurationSetting} that was updated, or {@code null}, if the configuration value does not * exist, is locked, or the key is an invalid value (which will also throw ServiceRequestException described * below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceModifiedException If a ConfigurationSetting with the same key and label does not exist, the * setting is locked, or {@link ConfigurationSetting * value. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting updateSetting(ConfigurationSetting setting) { return updateSetting(setting, Context.NONE).getValue(); } /** * Updates an existing configuration value in the service. The setting must already exist. Partial updates are not * supported, the entire configuration value is replaced. * * If {@link ConfigurationSetting * matches. * * <p><strong>Code Samples</strong></p> * * <p>Update the setting with the key-label pair "prodDBConnection"-"westUS" to have the value * "updated_db_connection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.updateSettingWithResponse * * @param setting The setting to add or update in the service. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A REST response containing the {@link ConfigurationSetting} that was updated, or {@code null}, if the * configuration value does not exist, is locked, or the key is an invalid value (which will also throw * ServiceRequestException described below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceModifiedException If a ConfigurationSetting with the same key and label does not exist, the * setting is locked, or {@link ConfigurationSetting * value. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<ConfigurationSetting> updateSettingWithResponse(ConfigurationSetting setting, Context context) { return updateSetting(setting, context); } private Response<ConfigurationSetting> updateSetting(ConfigurationSetting setting, Context context) { return client.updateSetting(setting, context).block(); } /** * Attempts to get a ConfigurationSetting that matches the {@code key}. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve the setting with the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.getSetting * * @param key The key of the setting to retrieve. * @return The {@link ConfigurationSetting} stored in the service, or {@code null}, if the configuration value does * not exist or the key is an invalid value (which will also throw ServiceRequestException described below). * @throws IllegalArgumentException If {@code key} is {@code null}. * @throws ResourceNotFoundException If a ConfigurationSetting with {@code key} does not exist. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting getSetting(String key) { return getSetting(new ConfigurationSetting().setKey(key), Context.NONE).getValue(); } /** * Attempts to get the ConfigurationSetting given the {@code key}, optional {@code label}. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve the setting with the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.getSetting * * @param setting The setting to retrieve based on its key and optional label combination. * @return The {@link ConfigurationSetting} stored in the service, or {@code null}, if the configuration value does * not exist or the key is an invalid value (which will also throw ServiceRequestException described below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceNotFoundException If a ConfigurationSetting with the same key and label does not exist. * @throws HttpResponseException If the {@code} key is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting getSetting(ConfigurationSetting setting) { return getSetting(setting, Context.NONE).getValue(); } /** * Attempts to get the ConfigurationSetting given the {@code key}, optional {@code label}. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve the setting with the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.getSettingWithResponse * * @param setting The setting to retrieve based on its key and optional label combination. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A REST response containg the {@link ConfigurationSetting} stored in the service, or {@code null}, if the * configuration value does not exist or the key is an invalid value (which will also throw ServiceRequestException * described below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceNotFoundException If a ConfigurationSetting with the same key and label does not exist. * @throws HttpResponseException If the {@code} key is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<ConfigurationSetting> getSettingWithResponse(ConfigurationSetting setting, Context context) { return getSetting(setting, context); } private Response<ConfigurationSetting> getSetting(ConfigurationSetting setting, Context context) { return client.getSetting(setting, context).block(); } /** * Deletes the ConfigurationSetting with a matching {@code key}. * * <p><strong>Code Samples</strong></p> * * <p>Delete the setting with the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.deleteSetting * * @param key The key of the setting to delete. * @return The deleted ConfigurationSetting or {@code null} if it didn't exist. {@code null} is also returned if the * {@code key} is an invalid value (which will also throw ServiceRequestException described below). * @throws IllegalArgumentException If {@code key} is {@code null}. * @throws ResourceModifiedException If the ConfigurationSetting is locked. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting deleteSetting(String key) { return deleteSetting(new ConfigurationSetting().setKey(key), Context.NONE).getValue(); } /** * Deletes the {@link ConfigurationSetting} with a matching key, along with the given label and etag. * * If {@link ConfigurationSetting * the setting is <b>only</b> deleted if the etag matches the current etag; this means that no one has updated the * ConfigurationSetting yet. * * <p><strong>Code Samples</strong></p> * * <p>Delete the setting with the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.deleteSetting * * @param setting The ConfigurationSetting to delete. * @return The deleted ConfigurationSetting or {@code null} if didn't exist. {@code null} is also returned if the * {@code key} is an invalid value or {@link ConfigurationSetting * current etag (which will also throw ServiceRequestException described below). * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws NullPointerException When {@code setting} is {@code null}. * @throws ResourceModifiedException If the ConfigurationSetting is locked. * @throws ResourceNotFoundException If {@link ConfigurationSetting * character, and does not match the current etag value. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting deleteSetting(ConfigurationSetting setting) { return deleteSetting(setting, Context.NONE).getValue(); } /** * Deletes the {@link ConfigurationSetting} with a matching key, along with the given label and etag. * * If {@link ConfigurationSetting * the setting is <b>only</b> deleted if the etag matches the current etag; this means that no one has updated the * ConfigurationSetting yet. * * <p><strong>Code Samples</strong></p> * * <p>Delete the setting with the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.deleteSettingWithResponse * * @param setting The ConfigurationSetting to delete. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A REST response containing the deleted ConfigurationSetting or {@code null} if didn't exist. {@code null} * is also returned if the {@code key} is an invalid value or {@link ConfigurationSetting * does not match the current etag (which will also throw ServiceRequestException described below). * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws NullPointerException When {@code setting} is {@code null}. * @throws ResourceModifiedException If the ConfigurationSetting is locked. * @throws ResourceNotFoundException If {@link ConfigurationSetting * character, and does not match the current etag value. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<ConfigurationSetting> deleteSettingWithResponse(ConfigurationSetting setting, Context context) { return deleteSetting(setting, context); } private Response<ConfigurationSetting> deleteSetting(ConfigurationSetting setting, Context context) { return client.deleteSetting(setting, context).block(); } /** * Fetches the configuration settings that match the {@code options}. If {@code options} is {@code null}, then all * the {@link ConfigurationSetting configuration settings} are fetched with their current values. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve all settings that use the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.listSettings * * @param options Optional. Options to filter configuration setting results from the service. * @return A {@link PagedIterable} of ConfigurationSettings that matches the {@code options}. If no options were * provided, the List contains all of the current settings in the service. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<ConfigurationSetting> listSettings(SettingSelector options) { return listSettings(options, Context.NONE); } /** * Fetches the configuration settings that match the {@code options}. If {@code options} is {@code null}, then all * the {@link ConfigurationSetting configuration settings} are fetched with their current values. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve all settings that use the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.listSettings * * @param options Optional. Options to filter configuration setting results from the service. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A {@link PagedIterable} of ConfigurationSettings that matches the {@code options}. If no options were * provided, the {@link PagedIterable} contains all of the current settings in the service. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<ConfigurationSetting> listSettings(SettingSelector options, Context context) { return new PagedIterable<>(client.listSettings(options, context)); } /** * Lists chronological/historical representation of {@link ConfigurationSetting} resource(s). Revisions are provided * in descending order from their {@link ConfigurationSetting * after a period of time. The service maintains change history for up to 7 days. * * If {@code options} is {@code null}, then all the {@link ConfigurationSetting ConfigurationSettings} are fetched * in their current state. Otherwise, the results returned match the parameters given in {@code options}. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve all revisions of the setting that has the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.listSettingRevisions * * @param selector Optional. Used to filter configuration setting revisions from the service. * @return {@link PagedIterable} of {@link ConfigurationSetting} revisions. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<ConfigurationSetting> listSettingRevisions(SettingSelector selector) { return listSettingRevisions(selector, Context.NONE); } /** * Lists chronological/historical representation of {@link ConfigurationSetting} resource(s). Revisions are provided * in descending order from their {@link ConfigurationSetting * after a period of time. The service maintains change history for up to 7 days. * * If {@code options} is {@code null}, then all the {@link ConfigurationSetting ConfigurationSettings} are fetched * in their current state. Otherwise, the results returned match the parameters given in {@code options}. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve all revisions of the setting that has the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.listSettingRevisions * * @param selector Optional. Used to filter configuration setting revisions from the service. * @param context Additional context that is passed through the Http pipeline during the service call. * @return {@link PagedIterable} of {@link ConfigurationSetting} revisions. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<ConfigurationSetting> listSettingRevisions(SettingSelector selector, Context context) { return new PagedIterable<>(client.listSettingRevisions(selector, context)); } }
`getBody()`?
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { final Flux<ByteBuffer> contents = context.getHttpRequest().body() == null ? Flux.just(getEmptyBuffer()) : context.getHttpRequest().body(); return credentials .getAuthorizationHeadersAsync( context.getHttpRequest().getUrl(), context.getHttpRequest().getHttpMethod().toString(), contents.defaultIfEmpty(getEmptyBuffer())) .flatMapMany(headers -> Flux.fromIterable(headers.entrySet())) .map(header -> context.getHttpRequest().setHeader(header.getKey(), header.getValue())) .last() .flatMap(request -> next.process()); }
: context.getHttpRequest().body();
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { final Flux<ByteBuffer> contents = context.getHttpRequest().getBody() == null ? Flux.just(getEmptyBuffer()) : context.getHttpRequest().getBody(); return credentials .getAuthorizationHeadersAsync( context.getHttpRequest().getUrl(), context.getHttpRequest().getHttpMethod().toString(), contents.defaultIfEmpty(getEmptyBuffer())) .flatMapMany(headers -> Flux.fromIterable(headers.entrySet())) .map(header -> context.getHttpRequest().setHeader(header.getKey(), header.getValue())) .last() .flatMap(request -> next.process()); }
class ConfigurationCredentialsPolicy implements HttpPipelinePolicy { private final ConfigurationClientCredentials credentials; /** * Creates an instance that is able to apply a {@link ConfigurationClientCredentials} credential to a request in the * pipeline. * * @param credentials the credential information to authenticate to Azure App Configuration service */ public ConfigurationCredentialsPolicy(ConfigurationClientCredentials credentials) { this.credentials = credentials; } /** * Adds the required headers to authenticate a request to Azure App Configuration service. * * @param context The request context * @param next The next HTTP pipeline policy to process the {@code context's} request after this policy * completes. * @return A {@link Mono} representing the HTTP response that will arrive asynchronously. */ @Override private ByteBuffer getEmptyBuffer() { return ByteBuffer.allocate(0); } }
class ConfigurationCredentialsPolicy implements HttpPipelinePolicy { private final ConfigurationClientCredentials credentials; /** * Creates an instance that is able to apply a {@link ConfigurationClientCredentials} credential to a request in the * pipeline. * * @param credentials the credential information to authenticate to Azure App Configuration service */ public ConfigurationCredentialsPolicy(ConfigurationClientCredentials credentials) { this.credentials = credentials; } /** * Adds the required headers to authenticate a request to Azure App Configuration service. * * @param context The request context * @param next The next HTTP pipeline policy to process the {@code context's} request after this policy * completes. * @return A {@link Mono} representing the HTTP response that will arrive asynchronously. */ @Override private ByteBuffer getEmptyBuffer() { return ByteBuffer.allocate(0); } }
`getKey()` and `getValue()`
public void setSetting() { ConfigurationClient configurationClient = createSyncConfigurationClient(); ConfigurationSetting result = configurationClient .setSetting("prodDBConnection", "db_connection"); System.out.printf("Key: %s, Value: %s", result.key(), result.value()); result = configurationClient.setSetting("prodDBConnection", "updated_db_connection"); System.out.printf("Key: %s, Value: %s", result.key(), result.value()); /* Generates code sample for using {@link ConfigurationClient */ ConfigurationSetting resultSetting = configurationClient .setSetting(new ConfigurationSetting().key("prodDBConnection").label("westUS").value("db_connection")); System.out.printf("Key: %s, Value: %s", resultSetting.key(), resultSetting.value()); resultSetting = configurationClient .setSetting(new ConfigurationSetting() .key("prodDBConnection").label("westUS").value("updated_db_connection")); System.out.printf("Key: %s, Value: %s", resultSetting.key(), resultSetting.value()); /* Generates code sample for using {@link ConfigurationClient */ Response<ConfigurationSetting> responseSetting = configurationClient .setSettingWithResponse(new ConfigurationSetting().key("prodDBConnection").label("westUS") .value("db_connection"), new Context(key2, value2)); System.out.printf("Key: %s, Value: %s", responseSetting.getValue().key(), responseSetting.getValue().value()); responseSetting = configurationClient .setSettingWithResponse(new ConfigurationSetting().key("prodDBConnection").label("westUS") .value("updated_db_connection"), new Context(key2, value2)); System.out.printf("Key: %s, Value: %s", responseSetting.getValue().key(), responseSetting.getValue().value()); }
System.out.printf("Key: %s, Value: %s", responseSetting.getValue().key(), responseSetting.getValue().value());
public void setSetting() { ConfigurationClient configurationClient = createSyncConfigurationClient(); ConfigurationSetting result = configurationClient .setSetting("prodDBConnection", "db_connection"); System.out.printf("Key: %s, Value: %s", result.getKey(), result.getValue()); result = configurationClient.setSetting("prodDBConnection", "updated_db_connection"); System.out.printf("Key: %s, Value: %s", result.getKey(), result.getValue()); /* Generates code sample for using {@link ConfigurationClient */ ConfigurationSetting resultSetting = configurationClient .setSetting( new ConfigurationSetting().setKey("prodDBConnection").setLabel("westUS").setValue("db_connection")); System.out.printf("Key: %s, Value: %s", resultSetting.getKey(), resultSetting.getValue()); resultSetting = configurationClient .setSetting(new ConfigurationSetting() .setKey("prodDBConnection").setLabel("westUS").setValue("updated_db_connection")); System.out.printf("Key: %s, Value: %s", resultSetting.getKey(), resultSetting.getValue()); /* Generates code sample for using {@link ConfigurationClient */ Response<ConfigurationSetting> responseSetting = configurationClient .setSettingWithResponse(new ConfigurationSetting().setKey("prodDBConnection").setLabel("westUS") .setValue("db_connection"), new Context(key2, value2)); System.out .printf("Key: %s, Value: %s", responseSetting.getValue().getKey(), responseSetting.getValue().getValue()); responseSetting = configurationClient .setSettingWithResponse(new ConfigurationSetting().setKey("prodDBConnection").setLabel("westUS") .setValue("updated_db_connection"), new Context(key2, value2)); System.out .printf("Key: %s, Value: %s", responseSetting.getValue().getKey(), responseSetting.getValue().getValue()); }
class ConfigurationClientJavaDocCodeSnippets { private String key1 = "key1"; private String key2 = "key2"; private String value1 = "val1"; private String value2 = "val2"; /** * Generates code sample for creating a {@link ConfigurationClient} * @return An instance of {@link ConfigurationClient} * @throws IllegalStateException If configuration credentials cannot be created. */ public ConfigurationClient createAsyncConfigurationClientWithPipeline() { try { String connectionString = getConnectionString(); RecordedData networkData = new RecordedData(); HttpPipeline pipeline = new HttpPipelineBuilder().policies(new RecordNetworkCallPolicy(networkData)).build(); ConfigurationClient configurationClient = new ConfigurationClientBuilder() .pipeline(pipeline) .endpoint("https: .credential(new ConfigurationClientCredentials(connectionString)) .buildClient(); return configurationClient; } catch (GeneralSecurityException ex) { throw new IllegalStateException("Failed to create configuration client credentials", ex); } } /** * Generates code sample for creating a {@link ConfigurationClient} * @return An instance of {@link ConfigurationClient} * @throws IllegalStateException If configuration credentials cannot be created */ public ConfigurationAsyncClient createAsyncConfigurationClient() { try { String connectionString = getConnectionString(); ConfigurationAsyncClient configurationAsyncClient = new ConfigurationClientBuilder() .credential(new ConfigurationClientCredentials(connectionString)) .buildAsyncClient(); return configurationAsyncClient; } catch (GeneralSecurityException ex) { throw new IllegalStateException("Failed to create configuration client credentials", ex); } } /** * Generates code sample for creating a {@link ConfigurationClient} * @return An instance of {@link ConfigurationClient} * @throws IllegalStateException If configuration credentials cannot be created */ public ConfigurationClient createSyncConfigurationClient() { try { String connectionString = getConnectionString(); ConfigurationClient configurationClient = new ConfigurationClientBuilder() .credential(new ConfigurationClientCredentials(connectionString)) .buildClient(); return configurationClient; } catch (GeneralSecurityException ex) { throw new IllegalStateException("Failed to create configuration client credentials", ex); } } /** * Generates code sample for using {@link ConfigurationClient */ public void addSetting() { ConfigurationClient configurationClient = createSyncConfigurationClient(); ConfigurationSetting result = configurationClient .addSetting("prodDBConnection", "db_connection"); System.out.printf("Key: %s, Value: %s", result.key(), result.value()); /* Generates code sample for using {@link ConfigurationClient */ ConfigurationSetting resultSetting = configurationClient .addSetting(new ConfigurationSetting().key("prodDBConnection").label("westUS").value("db_connection")); System.out.printf("Key: %s, Value: %s", resultSetting.key(), resultSetting.value()); /* Generates code sample for using {@link ConfigurationClient */ Response<ConfigurationSetting> responseResultSetting = configurationClient .addSettingWithResponse( new ConfigurationSetting() .key("prodDBConnection").label("westUS").value("db_connection"), new Context(key1, value1)); System.out.printf("Key: %s, Value: %s", responseResultSetting.getValue().key(), responseResultSetting.getValue().value()); } /** * Generates code sample for using {@link ConfigurationClient */ /** * Generates code sample for using {@link ConfigurationClient */ public void getSetting() { ConfigurationClient configurationClient = createSyncConfigurationClient(); ConfigurationSetting result = configurationClient.getSetting("prodDBConnection"); System.out.printf("Key: %s, Value: %s", result.key(), result.value()); /* Generates code sample for using {@link ConfigurationClient */ ConfigurationSetting resultSetting = configurationClient .getSetting(new ConfigurationSetting().key("prodDBConnection").label("westUS")); System.out.printf("Key: %s, Value: %s", resultSetting.key(), resultSetting.value()); /* Generates code sample for using {@link ConfigurationClient */ Response<ConfigurationSetting> responseResultSetting = configurationClient .getSettingWithResponse(new ConfigurationSetting().key("prodDBConnection").label("westUS"), new Context(key1, value1)); System.out.printf("Key: %s, Value: %s", responseResultSetting.getValue().key(), responseResultSetting.getValue().value()); } /** * Generates code sample for using {@link ConfigurationClient */ public void updateSetting() { ConfigurationClient configurationClient = createSyncConfigurationClient(); ConfigurationSetting result = configurationClient.updateSetting("prodDBConnection", "updated_db_connection"); System.out.printf("Key: %s, Value: %s", result.key(), result.value()); /* Generates code sample for using {@link ConfigurationClient */ ConfigurationSetting resultSetting = configurationClient .updateSetting( new ConfigurationSetting().key("prodDBConnection").label("westUS").value("updated_db_connection")); System.out.printf("Key: %s, Value: %s", resultSetting.key(), resultSetting.value()); /* Generates code sample for using {@link ConfigurationClient */ Response<ConfigurationSetting> responseResultSetting = configurationClient .updateSettingWithResponse(new ConfigurationSetting().key("prodDBConnection").label("westUS") .value("updated_db_connection"), new Context(key1, value1)); System.out.printf("Key: %s, Value: %s", responseResultSetting.getValue().key(), responseResultSetting.getValue().value()); } /** * Generates code sample for using {@link ConfigurationClient */ public void deleteSetting() { ConfigurationClient configurationClient = createSyncConfigurationClient(); ConfigurationSetting result = configurationClient .deleteSetting("prodDBConnection"); System.out.printf("Key: %s, Value: %s", result.key(), result.value()); /** * Generates code sample for using {@link ConfigurationClient */ ConfigurationSetting resultSetting = configurationClient .deleteSetting(new ConfigurationSetting().key("prodDBConnection").label("westUS")); System.out.printf("Key: %s, Value: %s", resultSetting.key(), resultSetting.value()); /** * Generates code sample for using {@link ConfigurationClient */ Response<ConfigurationSetting> responseSetting = configurationClient .deleteSettingWithResponse(new ConfigurationSetting().key("prodDBConnection").label("westUS"), new Context(key2, value2)); System.out.printf("Key: %s, Value: %s", responseSetting.getValue().key(), responseSetting.getValue().value()); } /** * Generates code sample for using {@link ConfigurationClient */ public void listSettings() { ConfigurationClient configurationClient = createSyncConfigurationClient(); SettingSelector settingSelector = new SettingSelector().keys("prodDBConnection"); configurationClient.listSettings(settingSelector).forEach(setting -> { System.out.printf("Key: %s, Value: %s", setting.key(), setting.value()); }); } /** * Generates code sample for using {@link ConfigurationClient */ public void listSettingsContext() { ConfigurationClient configurationClient = createSyncConfigurationClient(); SettingSelector settingSelector = new SettingSelector().keys("prodDBConnection"); Context ctx = new Context(key2, value2); configurationClient.listSettings(settingSelector, ctx).forEach(setting -> { System.out.printf("Key: %s, Value: %s", setting.key(), setting.value()); }); } /** * Generates code sample for using {@link ConfigurationClient */ public void listSettingRevisions() { ConfigurationClient client = createSyncConfigurationClient(); SettingSelector settingSelector = new SettingSelector().keys("prodDBConnection"); client.listSettingRevisions(settingSelector).streamByPage().forEach(resp -> { System.out.printf("Response headers are %s. Url %s and status code %d %n", resp.getHeaders(), resp.getRequest().getUrl(), resp.statusCode()); resp.getItems().forEach(value -> { System.out.printf("Response value is %d %n", value); }); }); } /** * Generates code sample for using {@link ConfigurationClient */ public void listSettingRevisionsContext() { ConfigurationClient configurationClient = createSyncConfigurationClient(); SettingSelector settingSelector = new SettingSelector().keys("prodDBConnection"); Context ctx = new Context(key2, value2); configurationClient.listSettingRevisions(settingSelector, ctx).forEach(setting -> { System.out.printf("Key: %s, Value: %s", setting.key(), setting.value()); }); } /** * Implementation not provided for this method * @return {@code null} */ private String getConnectionString() { return null; } }
class ConfigurationClientJavaDocCodeSnippets { private String key1 = "key1"; private String key2 = "key2"; private String value1 = "val1"; private String value2 = "val2"; /** * Generates code sample for creating a {@link ConfigurationClient} * * @return An instance of {@link ConfigurationClient} * @throws IllegalStateException If configuration credentials cannot be created. */ public ConfigurationClient createAsyncConfigurationClientWithPipeline() { try { String connectionString = getConnectionString(); HttpPipeline pipeline = new HttpPipelineBuilder() .policies(/* add policies */) .build(); ConfigurationClient configurationClient = new ConfigurationClientBuilder() .pipeline(pipeline) .endpoint("https: .credential(new ConfigurationClientCredentials(connectionString)) .buildClient(); return configurationClient; } catch (GeneralSecurityException ex) { throw new IllegalStateException("Failed to create configuration client credentials", ex); } } /** * Generates code sample for creating a {@link ConfigurationClient} * * @return An instance of {@link ConfigurationClient} * @throws IllegalStateException If configuration credentials cannot be created */ public ConfigurationAsyncClient createAsyncConfigurationClient() { try { String connectionString = getConnectionString(); ConfigurationAsyncClient configurationAsyncClient = new ConfigurationClientBuilder() .credential(new ConfigurationClientCredentials(connectionString)) .buildAsyncClient(); return configurationAsyncClient; } catch (GeneralSecurityException ex) { throw new IllegalStateException("Failed to create configuration client credentials", ex); } } /** * Generates code sample for creating a {@link ConfigurationClient} * * @return An instance of {@link ConfigurationClient} * @throws IllegalStateException If configuration credentials cannot be created */ public ConfigurationClient createSyncConfigurationClient() { try { String connectionString = getConnectionString(); ConfigurationClient configurationClient = new ConfigurationClientBuilder() .credential(new ConfigurationClientCredentials(connectionString)) .buildClient(); return configurationClient; } catch (GeneralSecurityException ex) { throw new IllegalStateException("Failed to create configuration client credentials", ex); } } /** * Generates code sample for using {@link ConfigurationClient */ public void addSetting() { ConfigurationClient configurationClient = createSyncConfigurationClient(); ConfigurationSetting result = configurationClient .addSetting("prodDBConnection", "db_connection"); System.out.printf("Key: %s, Value: %s", result.getKey(), result.getValue()); /* Generates code sample for using {@link ConfigurationClient */ ConfigurationSetting resultSetting = configurationClient .addSetting( new ConfigurationSetting().setKey("prodDBConnection").setLabel("westUS").setValue("db_connection")); System.out.printf("Key: %s, Value: %s", resultSetting.getKey(), resultSetting.getValue()); /* Generates code sample for using {@link ConfigurationClient */ Response<ConfigurationSetting> responseResultSetting = configurationClient .addSettingWithResponse( new ConfigurationSetting() .setKey("prodDBConnection").setLabel("westUS").setValue("db_connection"), new Context(key1, value1)); System.out.printf("Key: %s, Value: %s", responseResultSetting.getValue().getKey(), responseResultSetting.getValue().getValue()); } /** * Generates code sample for using {@link ConfigurationClient */ /** * Generates code sample for using {@link ConfigurationClient */ public void getSetting() { ConfigurationClient configurationClient = createSyncConfigurationClient(); ConfigurationSetting result = configurationClient.getSetting("prodDBConnection"); System.out.printf("Key: %s, Value: %s", result.getKey(), result.getValue()); /* Generates code sample for using {@link ConfigurationClient */ ConfigurationSetting resultSetting = configurationClient .getSetting(new ConfigurationSetting().setKey("prodDBConnection").setLabel("westUS")); System.out.printf("Key: %s, Value: %s", resultSetting.getKey(), resultSetting.getValue()); /* Generates code sample for using {@link ConfigurationClient */ Response<ConfigurationSetting> responseResultSetting = configurationClient .getSettingWithResponse(new ConfigurationSetting().setKey("prodDBConnection").setLabel("westUS"), new Context(key1, value1)); System.out.printf("Key: %s, Value: %s", responseResultSetting.getValue().getKey(), responseResultSetting.getValue().getValue()); } /** * Generates code sample for using {@link ConfigurationClient */ public void updateSetting() { ConfigurationClient configurationClient = createSyncConfigurationClient(); ConfigurationSetting result = configurationClient.updateSetting("prodDBConnection", "updated_db_connection"); System.out.printf("Key: %s, Value: %s", result.getKey(), result.getValue()); /* Generates code sample for using {@link ConfigurationClient */ ConfigurationSetting resultSetting = configurationClient .updateSetting( new ConfigurationSetting().setKey("prodDBConnection").setLabel("westUS") .setValue("updated_db_connection")); System.out.printf("Key: %s, Value: %s", resultSetting.getKey(), resultSetting.getValue()); /* Generates code sample for using {@link ConfigurationClient */ Response<ConfigurationSetting> responseResultSetting = configurationClient .updateSettingWithResponse(new ConfigurationSetting().setKey("prodDBConnection").setLabel("westUS") .setValue("updated_db_connection"), new Context(key1, value1)); System.out.printf("Key: %s, Value: %s", responseResultSetting.getValue().getKey(), responseResultSetting.getValue().getValue()); } /** * Generates code sample for using {@link ConfigurationClient */ public void deleteSetting() { ConfigurationClient configurationClient = createSyncConfigurationClient(); ConfigurationSetting result = configurationClient .deleteSetting("prodDBConnection"); System.out.printf("Key: %s, Value: %s", result.getKey(), result.getValue()); /** * Generates code sample for using {@link ConfigurationClient */ ConfigurationSetting resultSetting = configurationClient .deleteSetting(new ConfigurationSetting().setKey("prodDBConnection").setLabel("westUS")); System.out.printf("Key: %s, Value: %s", resultSetting.getKey(), resultSetting.getValue()); /** * Generates code sample for using {@link ConfigurationClient */ Response<ConfigurationSetting> responseSetting = configurationClient .deleteSettingWithResponse(new ConfigurationSetting().setKey("prodDBConnection").setLabel("westUS"), new Context(key2, value2)); System.out .printf("Key: %s, Value: %s", responseSetting.getValue().getKey(), responseSetting.getValue().getValue()); } /** * Generates code sample for using {@link ConfigurationClient */ public void listSettings() { ConfigurationClient configurationClient = createSyncConfigurationClient(); SettingSelector settingSelector = new SettingSelector().setKeys("prodDBConnection"); configurationClient.listSettings(settingSelector).forEach(setting -> { System.out.printf("Key: %s, Value: %s", setting.getKey(), setting.getValue()); }); } /** * Generates code sample for using {@link ConfigurationClient */ public void listSettingsContext() { ConfigurationClient configurationClient = createSyncConfigurationClient(); SettingSelector settingSelector = new SettingSelector().setKeys("prodDBConnection"); Context ctx = new Context(key2, value2); configurationClient.listSettings(settingSelector, ctx).forEach(setting -> { System.out.printf("Key: %s, Value: %s", setting.getKey(), setting.getValue()); }); } /** * Generates code sample for using {@link ConfigurationClient */ public void listSettingRevisions() { ConfigurationClient client = createSyncConfigurationClient(); SettingSelector settingSelector = new SettingSelector().setKeys("prodDBConnection"); client.listSettingRevisions(settingSelector).streamByPage().forEach(resp -> { System.out.printf("Response headers are %s. Url %s and status code %d %n", resp.getHeaders(), resp.getRequest().getUrl(), resp.getStatusCode()); resp.getItems().forEach(value -> { System.out.printf("Response value is %d %n", value); }); }); } /** * Generates code sample for using {@link ConfigurationClient */ public void listSettingRevisionsContext() { ConfigurationClient configurationClient = createSyncConfigurationClient(); SettingSelector settingSelector = new SettingSelector().setKeys("prodDBConnection"); Context ctx = new Context(key2, value2); configurationClient.listSettingRevisions(settingSelector, ctx).forEach(setting -> { System.out.printf("Key: %s, Value: %s", setting.getKey(), setting.getValue()); }); } /** * Implementation not provided for this method * * @return {@code null} */ private String getConnectionString() { return null; } }
`getStatusCode()`
static void assertConfigurationEquals(ConfigurationSetting expected, Response<ConfigurationSetting> response, final int expectedStatusCode) { assertNotNull(response); assertEquals(expectedStatusCode, response.statusCode()); assertConfigurationEquals(expected, response.getValue()); }
assertEquals(expectedStatusCode, response.statusCode());
static void assertConfigurationEquals(ConfigurationSetting expected, Response<ConfigurationSetting> response, final int expectedStatusCode) { assertNotNull(response); assertEquals(expectedStatusCode, response.getStatusCode()); assertConfigurationEquals(expected, response.getValue()); }
class ConfigurationClientTestBase extends TestBase { private static final String AZURE_APPCONFIG_CONNECTION_STRING = "AZURE_APPCONFIG_CONNECTION_STRING"; private static final String KEY_PREFIX = "key"; private static final String LABEL_PREFIX = "label"; private static final int PREFIX_LENGTH = 8; private static final int RESOURCE_LENGTH = 16; private static String connectionString; private final ClientLogger logger = new ClientLogger(ConfigurationClientTestBase.class); String keyPrefix; String labelPrefix; @Rule public TestName testName = new TestName(); @Override public String getTestName() { return testName.getMethodName(); } void beforeTestSetup() { keyPrefix = testResourceNamer.randomName(KEY_PREFIX, PREFIX_LENGTH); labelPrefix = testResourceNamer.randomName(LABEL_PREFIX, PREFIX_LENGTH); } <T> T clientSetup(Function<ConfigurationClientCredentials, T> clientBuilder) { if (ImplUtils.isNullOrEmpty(connectionString)) { connectionString = interceptorManager.isPlaybackMode() ? "Endpoint=http: : ConfigurationManager.getConfiguration().get(AZURE_APPCONFIG_CONNECTION_STRING); } Objects.requireNonNull(connectionString, "AZURE_APPCONFIG_CONNECTION_STRING expected to be set."); T client; try { client = clientBuilder.apply(new ConfigurationClientCredentials(connectionString)); } catch (InvalidKeyException | NoSuchAlgorithmException e) { logger.error("Could not create an configuration client credentials.", e); fail(); client = null; } return Objects.requireNonNull(client); } String getKey() { return testResourceNamer.randomName(keyPrefix, RESOURCE_LENGTH); } String getLabel() { return testResourceNamer.randomName(labelPrefix, RESOURCE_LENGTH); } @Test public abstract void addSetting(); void addSettingRunner(Consumer<ConfigurationSetting> testRunner) { final Map<String, String> tags = new HashMap<>(); tags.put("MyTag", "TagValue"); tags.put("AnotherTag", "AnotherTagValue"); final ConfigurationSetting newConfiguration = new ConfigurationSetting() .key(getKey()) .value("myNewValue") .tags(tags) .contentType("text"); testRunner.accept(newConfiguration); testRunner.accept(newConfiguration.label(getLabel())); } @Test public abstract void addSettingEmptyKey(); @Test public abstract void addSettingEmptyValue(); void addSettingEmptyValueRunner(Consumer<ConfigurationSetting> testRunner) { String key = getKey(); ConfigurationSetting setting = new ConfigurationSetting().key(key); ConfigurationSetting setting2 = new ConfigurationSetting().key(key + "-1").value(""); testRunner.accept(setting); testRunner.accept(setting2); } @Test public abstract void addSettingNullKey(); @Test public abstract void addExistingSetting(); void addExistingSettingRunner(Consumer<ConfigurationSetting> testRunner) { final ConfigurationSetting newConfiguration = new ConfigurationSetting().key(getKey()).value("myNewValue"); testRunner.accept(newConfiguration); testRunner.accept(newConfiguration.label(getLabel())); } @Test public abstract void setSetting(); void setSettingRunner(BiConsumer<ConfigurationSetting, ConfigurationSetting> testRunner) { String key = getKey(); String label = getLabel(); final ConfigurationSetting setConfiguration = new ConfigurationSetting().key(key).value("myNewValue"); final ConfigurationSetting updateConfiguration = new ConfigurationSetting().key(key).value("myUpdatedValue"); testRunner.accept(setConfiguration, updateConfiguration); testRunner.accept(setConfiguration.label(label), updateConfiguration.label(label)); } @Test public abstract void setSettingIfEtag(); void setSettingIfEtagRunner(BiConsumer<ConfigurationSetting, ConfigurationSetting> testRunner) { String key = getKey(); String label = getLabel(); final ConfigurationSetting newConfiguration = new ConfigurationSetting().key(key).value("myNewValue"); final ConfigurationSetting updateConfiguration = new ConfigurationSetting().key(key).value("myUpdateValue"); testRunner.accept(newConfiguration, updateConfiguration); testRunner.accept(newConfiguration.label(label), updateConfiguration.label(label)); } @Test public abstract void setSettingEmptyKey(); @Test public abstract void setSettingEmptyValue(); void setSettingEmptyValueRunner(Consumer<ConfigurationSetting> testRunner) { String key = getKey(); ConfigurationSetting setting = new ConfigurationSetting().key(key); ConfigurationSetting setting2 = new ConfigurationSetting().key(key + "-1").value(""); testRunner.accept(setting); testRunner.accept(setting2); } @Test public abstract void setSettingNullKey(); @Test public abstract void updateNoExistingSetting(); void updateNoExistingSettingRunner(Consumer<ConfigurationSetting> testRunner) { final ConfigurationSetting expectedFail = new ConfigurationSetting().key(getKey()).value("myFailingUpdate"); testRunner.accept(expectedFail); testRunner.accept(expectedFail.label(getLabel())); } @Test public abstract void updateSetting(); void updateSettingRunner(BiConsumer<ConfigurationSetting, ConfigurationSetting> testRunner) { String key = getKey(); String label = getLabel(); final Map<String, String> tags = new HashMap<>(); tags.put("first tag", "first value"); tags.put("second tag", "second value"); final ConfigurationSetting original = new ConfigurationSetting() .key(key) .value("myNewValue") .tags(tags) .contentType("json"); final Map<String, String> updatedTags = new HashMap<>(tags); final ConfigurationSetting updated = new ConfigurationSetting() .key(original.key()) .value("myUpdatedValue") .tags(updatedTags) .contentType("text"); testRunner.accept(original, updated); testRunner.accept(original.label(label), updated.label(label)); } @Test public abstract void updateSettingOverload(); void updateSettingOverloadRunner(BiConsumer<ConfigurationSetting, ConfigurationSetting> testRunner) { String key = getKey(); ConfigurationSetting original = new ConfigurationSetting().key(key).value("A Value"); ConfigurationSetting updated = new ConfigurationSetting().key(key).value("A New Value"); testRunner.accept(original, updated); } @Test public abstract void updateSettingNullKey(); @Test public abstract void updateSettingIfEtag(); void updateSettingIfEtagRunner(Consumer<List<ConfigurationSetting>> testRunner) { final String key = getKey(); final String label = getLabel(); final ConfigurationSetting newConfiguration = new ConfigurationSetting().key(key).value("myNewValue"); final ConfigurationSetting updateConfiguration = new ConfigurationSetting().key(key).value("myUpdateValue"); final ConfigurationSetting finalConfiguration = new ConfigurationSetting().key(key).value("myFinalValue"); testRunner.accept(Arrays.asList(newConfiguration, updateConfiguration, finalConfiguration)); testRunner.accept(Arrays.asList(newConfiguration.label(label), updateConfiguration.label(label), finalConfiguration.label(label))); } @Test public abstract void getSetting(); void getSettingRunner(Consumer<ConfigurationSetting> testRunner) { String key = getKey(); final ConfigurationSetting newConfiguration = new ConfigurationSetting().key(key).value("myNewValue"); testRunner.accept(newConfiguration); testRunner.accept(newConfiguration.label("myLabel")); } @Test public abstract void getSettingNotFound(); @Test public abstract void deleteSetting(); void deleteSettingRunner(Consumer<ConfigurationSetting> testRunner) { String key = getKey(); String label = getLabel(); final ConfigurationSetting deletableConfiguration = new ConfigurationSetting().key(key).value("myValue"); testRunner.accept(deletableConfiguration); testRunner.accept(deletableConfiguration.label(label)); } @Test public abstract void deleteSettingNotFound(); @Test public abstract void deleteSettingWithETag(); void deleteSettingWithETagRunner(BiConsumer<ConfigurationSetting, ConfigurationSetting> testRunner) { String key = getKey(); String label = getLabel(); final ConfigurationSetting newConfiguration = new ConfigurationSetting().key(key).value("myNewValue"); final ConfigurationSetting updateConfiguration = new ConfigurationSetting().key(newConfiguration.key()).value("myUpdateValue"); testRunner.accept(newConfiguration, updateConfiguration); testRunner.accept(newConfiguration.label(label), updateConfiguration.label(label)); } @Test public abstract void deleteSettingNullKey(); @Test public abstract void listWithKeyAndLabel(); @Test public abstract void listWithMultipleKeys(); void listWithMultipleKeysRunner(String key, String key2, BiFunction<ConfigurationSetting, ConfigurationSetting, Iterable<ConfigurationSetting>> testRunner) { final ConfigurationSetting setting = new ConfigurationSetting().key(key).value("value"); final ConfigurationSetting setting2 = new ConfigurationSetting().key(key2).value("value"); final Set<ConfigurationSetting> expectedSelection = new HashSet<>(Arrays.asList(setting, setting2)); testRunner.apply(setting, setting2).forEach(actual -> expectedSelection.removeIf(expected -> expected.equals(cleanResponse(expected, actual)))); assertTrue(expectedSelection.isEmpty()); } @Test public abstract void listWithMultipleLabels(); void listWithMultipleLabelsRunner(String key, String label, String label2, BiFunction<ConfigurationSetting, ConfigurationSetting, Iterable<ConfigurationSetting>> testRunner) { final ConfigurationSetting setting = new ConfigurationSetting().key(key).value("value").label(label); final ConfigurationSetting setting2 = new ConfigurationSetting().key(key).value("value").label(label2); final Set<ConfigurationSetting> expectedSelection = new HashSet<>(Arrays.asList(setting, setting2)); for (ConfigurationSetting actual : testRunner.apply(setting, setting2)) { expectedSelection.removeIf(expected -> expected.equals(cleanResponse(expected, actual))); } assertTrue(expectedSelection.isEmpty()); } @Test public abstract void listSettingsSelectFields(); void listSettingsSelectFieldsRunner(BiFunction<List<ConfigurationSetting>, SettingSelector, Iterable<ConfigurationSetting>> testRunner) { final String label = "my-first-mylabel"; final String label2 = "my-second-mylabel"; final int numberToCreate = 8; final Map<String, String> tags = new HashMap<>(); tags.put("tag1", "value1"); tags.put("tag2", "value2"); final SettingSelector selector = new SettingSelector() .labels("*-second*") .keys(keyPrefix + "-fetch-*") .fields(SettingFields.KEY, SettingFields.ETAG, SettingFields.CONTENT_TYPE, SettingFields.TAGS); List<ConfigurationSetting> settings = new ArrayList<>(numberToCreate); for (int value = 0; value < numberToCreate; value++) { String key = value % 2 == 0 ? keyPrefix + "-" + value : keyPrefix + "-fetch-" + value; String lbl = value / 4 == 0 ? label : label2; settings.add(new ConfigurationSetting().key(key).value("myValue2").label(lbl).tags(tags)); } for (ConfigurationSetting setting : testRunner.apply(settings, selector)) { assertNotNull(setting.etag()); assertNotNull(setting.key()); assertTrue(setting.key().contains(keyPrefix)); assertNotNull(setting.tags()); assertEquals(tags.size(), setting.tags().size()); assertNull(setting.lastModified()); assertNull(setting.contentType()); assertNull(setting.label()); } } @Test public abstract void listSettingsAcceptDateTime(); @Test public abstract void listRevisions(); static void validateListRevisions(ConfigurationSetting expected, ConfigurationSetting actual) { assertEquals(expected.key(), actual.key()); assertNotNull(actual.etag()); assertNull(actual.value()); assertNull(actual.lastModified()); } @Test public abstract void listRevisionsWithMultipleKeys(); void listRevisionsWithMultipleKeysRunner(String key, String key2, Function<List<ConfigurationSetting>, Iterable<ConfigurationSetting>> testRunner) { final ConfigurationSetting setting = new ConfigurationSetting().key(key).value("value"); final ConfigurationSetting settingUpdate = new ConfigurationSetting().key(setting.key()).value("updatedValue"); final ConfigurationSetting setting2 = new ConfigurationSetting().key(key2).value("value"); final ConfigurationSetting setting2Update = new ConfigurationSetting().key(setting2.key()).value("updatedValue"); final List<ConfigurationSetting> testInput = Arrays.asList(setting, settingUpdate, setting2, setting2Update); final Set<ConfigurationSetting> expectedSelection = new HashSet<>(testInput); for (ConfigurationSetting actual : testRunner.apply(testInput)) { expectedSelection.removeIf(expected -> expected.equals(cleanResponse(expected, actual))); } assertTrue(expectedSelection.isEmpty()); } @Test public abstract void listRevisionsWithMultipleLabels(); void listRevisionsWithMultipleLabelsRunner(String key, String label, String label2, Function<List<ConfigurationSetting>, Iterable<ConfigurationSetting>> testRunner) { final ConfigurationSetting setting = new ConfigurationSetting().key(key).value("value").label(label); final ConfigurationSetting settingUpdate = new ConfigurationSetting().key(setting.key()).label(setting.label()).value("updatedValue"); final ConfigurationSetting setting2 = new ConfigurationSetting().key(key).value("value").label(label2); final ConfigurationSetting setting2Update = new ConfigurationSetting().key(setting2.key()).label(setting2.label()).value("updatedValue"); final List<ConfigurationSetting> testInput = Arrays.asList(setting, settingUpdate, setting2, setting2Update); final Set<ConfigurationSetting> expectedSelection = new HashSet<>(testInput); for (ConfigurationSetting actual : testRunner.apply(testInput)) { expectedSelection.removeIf(expected -> expected.equals(cleanResponse(expected, actual))); } assertTrue(expectedSelection.isEmpty()); } @Test public abstract void listRevisionsWithRange(); @Test @Ignore("alzimmermsft to investigate") public abstract void listRevisionsInvalidRange(); @Test public abstract void listRevisionsAcceptDateTime(); @Test public abstract void listRevisionsWithPagination(); @Test public abstract void listSettingsWithPagination(); @Test public abstract void listRevisionsWithPaginationAndRepeatStream(); @Test public abstract void listRevisionsWithPaginationAndRepeatIterator(); @Ignore("Getting a configuration setting only when the value has changed is not a common scenario.") @Test public abstract void getSettingWhenValueNotUpdated(); @Ignore("This test exists to clean up resources missed due to 429s.") @Test public abstract void deleteAllSettings(); /** * Helper method to verify that the RestResponse matches what was expected. This method assumes a response status of 200. * * @param expected ConfigurationSetting expected to be returned by the service * @param response RestResponse returned by the service, the body should contain a ConfigurationSetting */ static void assertConfigurationEquals(ConfigurationSetting expected, Response<ConfigurationSetting> response) { assertConfigurationEquals(expected, response, 200); } /** * Helper method to verify that the RestResponse matches what was expected. * * @param expected ConfigurationSetting expected to be returned by the service * @param response RestResponse returned from the service, the body should contain a ConfigurationSetting * @param expectedStatusCode Expected HTTP status code returned by the service */ /** * Helper method to verify that the returned ConfigurationSetting matches what was expected. * * @param expected ConfigurationSetting expected to be returned by the service * @param actual ConfigurationSetting contained in the RestResponse body */ static void assertConfigurationEquals(ConfigurationSetting expected, ConfigurationSetting actual) { if (expected != null && actual != null) { actual = cleanResponse(expected, actual); } assertEquals(expected, actual); } /** * The ConfigurationSetting has some fields that are only manipulated by the service, * this helper method cleans those fields on the setting returned by the service so tests are able to pass. * @param expected ConfigurationSetting expected to be returned by the service. * @param actual ConfigurationSetting returned by the service. */ private static ConfigurationSetting cleanResponse(ConfigurationSetting expected, ConfigurationSetting actual) { ConfigurationSetting cleanedActual = new ConfigurationSetting() .key(actual.key()) .label(actual.label()) .value(actual.value()) .tags(actual.tags()) .contentType(actual.contentType()) .etag(expected.etag()); try { Field lastModified = ConfigurationSetting.class.getDeclaredField("lastModified"); lastModified.setAccessible(true); lastModified.set(actual, expected.lastModified()); } catch (NoSuchFieldException | IllegalAccessException ex) { } if (ConfigurationSetting.NO_LABEL.equals(expected.label()) && actual.label() == null) { cleanedActual.label(ConfigurationSetting.NO_LABEL); } return cleanedActual; } static void assertRestException(Runnable exceptionThrower, int expectedStatusCode) { assertRestException(exceptionThrower, HttpResponseException.class, expectedStatusCode); } static void assertRestException(Runnable exceptionThrower, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) { try { exceptionThrower.run(); fail(); } catch (Throwable ex) { assertRestException(ex, expectedExceptionType, expectedStatusCode); } } /** * Helper method to verify the error was a HttpResponseException and it has a specific HTTP response code. * * @param exception Expected error thrown during the test * @param expectedStatusCode Expected HTTP status code contained in the error response */ static void assertRestException(Throwable exception, int expectedStatusCode) { assertRestException(exception, HttpResponseException.class, expectedStatusCode); } static void assertRestException(Throwable exception, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) { assertEquals(expectedExceptionType, exception.getClass()); assertEquals(expectedStatusCode, ((HttpResponseException) exception).getResponse().statusCode()); } /** * Helper method to verify that a command throws an IllegalArgumentException. * * @param exceptionThrower Command that should throw the exception */ static <T> void assertRunnableThrowsException(Runnable exceptionThrower, Class<T> exception) { try { exceptionThrower.run(); fail(); } catch (Exception ex) { assertEquals(exception, ex.getClass()); } } }
class ConfigurationClientTestBase extends TestBase { private static final String AZURE_APPCONFIG_CONNECTION_STRING = "AZURE_APPCONFIG_CONNECTION_STRING"; private static final String KEY_PREFIX = "key"; private static final String LABEL_PREFIX = "label"; private static final int PREFIX_LENGTH = 8; private static final int RESOURCE_LENGTH = 16; private static String connectionString; private final ClientLogger logger = new ClientLogger(ConfigurationClientTestBase.class); String keyPrefix; String labelPrefix; @Rule public TestName testName = new TestName(); @Override public String getTestName() { return testName.getMethodName(); } void beforeTestSetup() { keyPrefix = testResourceNamer.randomName(KEY_PREFIX, PREFIX_LENGTH); labelPrefix = testResourceNamer.randomName(LABEL_PREFIX, PREFIX_LENGTH); } <T> T clientSetup(Function<ConfigurationClientCredentials, T> clientBuilder) { if (ImplUtils.isNullOrEmpty(connectionString)) { connectionString = interceptorManager.isPlaybackMode() ? "Endpoint=http: : ConfigurationManager.getConfiguration().get(AZURE_APPCONFIG_CONNECTION_STRING); } Objects.requireNonNull(connectionString, "AZURE_APPCONFIG_CONNECTION_STRING expected to be set."); T client; try { client = clientBuilder.apply(new ConfigurationClientCredentials(connectionString)); } catch (InvalidKeyException | NoSuchAlgorithmException e) { logger.error("Could not create an configuration client credentials.", e); fail(); client = null; } return Objects.requireNonNull(client); } String getKey() { return testResourceNamer.randomName(keyPrefix, RESOURCE_LENGTH); } String getLabel() { return testResourceNamer.randomName(labelPrefix, RESOURCE_LENGTH); } @Test public abstract void addSetting(); void addSettingRunner(Consumer<ConfigurationSetting> testRunner) { final Map<String, String> tags = new HashMap<>(); tags.put("MyTag", "TagValue"); tags.put("AnotherTag", "AnotherTagValue"); final ConfigurationSetting newConfiguration = new ConfigurationSetting() .setKey(getKey()) .setValue("myNewValue") .setTags(tags) .setContentType("text"); testRunner.accept(newConfiguration); testRunner.accept(newConfiguration.setLabel(getLabel())); } @Test public abstract void addSettingEmptyKey(); @Test public abstract void addSettingEmptyValue(); void addSettingEmptyValueRunner(Consumer<ConfigurationSetting> testRunner) { String key = getKey(); ConfigurationSetting setting = new ConfigurationSetting().setKey(key); ConfigurationSetting setting2 = new ConfigurationSetting().setKey(key + "-1").setValue(""); testRunner.accept(setting); testRunner.accept(setting2); } @Test public abstract void addSettingNullKey(); @Test public abstract void addExistingSetting(); void addExistingSettingRunner(Consumer<ConfigurationSetting> testRunner) { final ConfigurationSetting newConfiguration = new ConfigurationSetting().setKey(getKey()).setValue("myNewValue"); testRunner.accept(newConfiguration); testRunner.accept(newConfiguration.setLabel(getLabel())); } @Test public abstract void setSetting(); void setSettingRunner(BiConsumer<ConfigurationSetting, ConfigurationSetting> testRunner) { String key = getKey(); String label = getLabel(); final ConfigurationSetting setConfiguration = new ConfigurationSetting().setKey(key).setValue("myNewValue"); final ConfigurationSetting updateConfiguration = new ConfigurationSetting().setKey(key).setValue("myUpdatedValue"); testRunner.accept(setConfiguration, updateConfiguration); testRunner.accept(setConfiguration.setLabel(label), updateConfiguration.setLabel(label)); } @Test public abstract void setSettingIfEtag(); void setSettingIfEtagRunner(BiConsumer<ConfigurationSetting, ConfigurationSetting> testRunner) { String key = getKey(); String label = getLabel(); final ConfigurationSetting newConfiguration = new ConfigurationSetting().setKey(key).setValue("myNewValue"); final ConfigurationSetting updateConfiguration = new ConfigurationSetting().setKey(key).setValue("myUpdateValue"); testRunner.accept(newConfiguration, updateConfiguration); testRunner.accept(newConfiguration.setLabel(label), updateConfiguration.setLabel(label)); } @Test public abstract void setSettingEmptyKey(); @Test public abstract void setSettingEmptyValue(); void setSettingEmptyValueRunner(Consumer<ConfigurationSetting> testRunner) { String key = getKey(); ConfigurationSetting setting = new ConfigurationSetting().setKey(key); ConfigurationSetting setting2 = new ConfigurationSetting().setKey(key + "-1").setValue(""); testRunner.accept(setting); testRunner.accept(setting2); } @Test public abstract void setSettingNullKey(); @Test public abstract void updateNoExistingSetting(); void updateNoExistingSettingRunner(Consumer<ConfigurationSetting> testRunner) { final ConfigurationSetting expectedFail = new ConfigurationSetting().setKey(getKey()).setValue("myFailingUpdate"); testRunner.accept(expectedFail); testRunner.accept(expectedFail.setLabel(getLabel())); } @Test public abstract void updateSetting(); void updateSettingRunner(BiConsumer<ConfigurationSetting, ConfigurationSetting> testRunner) { String key = getKey(); String label = getLabel(); final Map<String, String> tags = new HashMap<>(); tags.put("first tag", "first value"); tags.put("second tag", "second value"); final ConfigurationSetting original = new ConfigurationSetting() .setKey(key) .setValue("myNewValue") .setTags(tags) .setContentType("json"); final Map<String, String> updatedTags = new HashMap<>(tags); final ConfigurationSetting updated = new ConfigurationSetting() .setKey(original.getKey()) .setValue("myUpdatedValue") .setTags(updatedTags) .setContentType("text"); testRunner.accept(original, updated); testRunner.accept(original.setLabel(label), updated.setLabel(label)); } @Test public abstract void updateSettingOverload(); void updateSettingOverloadRunner(BiConsumer<ConfigurationSetting, ConfigurationSetting> testRunner) { String key = getKey(); ConfigurationSetting original = new ConfigurationSetting().setKey(key).setValue("A Value"); ConfigurationSetting updated = new ConfigurationSetting().setKey(key).setValue("A New Value"); testRunner.accept(original, updated); } @Test public abstract void updateSettingNullKey(); @Test public abstract void updateSettingIfEtag(); void updateSettingIfEtagRunner(Consumer<List<ConfigurationSetting>> testRunner) { final String key = getKey(); final String label = getLabel(); final ConfigurationSetting newConfiguration = new ConfigurationSetting().setKey(key).setValue("myNewValue"); final ConfigurationSetting updateConfiguration = new ConfigurationSetting().setKey(key).setValue("myUpdateValue"); final ConfigurationSetting finalConfiguration = new ConfigurationSetting().setKey(key).setValue("myFinalValue"); testRunner.accept(Arrays.asList(newConfiguration, updateConfiguration, finalConfiguration)); testRunner.accept(Arrays.asList(newConfiguration.setLabel(label), updateConfiguration.setLabel(label), finalConfiguration.setLabel(label))); } @Test public abstract void getSetting(); void getSettingRunner(Consumer<ConfigurationSetting> testRunner) { String key = getKey(); final ConfigurationSetting newConfiguration = new ConfigurationSetting().setKey(key).setValue("myNewValue"); testRunner.accept(newConfiguration); testRunner.accept(newConfiguration.setLabel("myLabel")); } @Test public abstract void getSettingNotFound(); @Test public abstract void deleteSetting(); void deleteSettingRunner(Consumer<ConfigurationSetting> testRunner) { String key = getKey(); String label = getLabel(); final ConfigurationSetting deletableConfiguration = new ConfigurationSetting().setKey(key).setValue("myValue"); testRunner.accept(deletableConfiguration); testRunner.accept(deletableConfiguration.setLabel(label)); } @Test public abstract void deleteSettingNotFound(); @Test public abstract void deleteSettingWithETag(); void deleteSettingWithETagRunner(BiConsumer<ConfigurationSetting, ConfigurationSetting> testRunner) { String key = getKey(); String label = getLabel(); final ConfigurationSetting newConfiguration = new ConfigurationSetting().setKey(key).setValue("myNewValue"); final ConfigurationSetting updateConfiguration = new ConfigurationSetting().setKey(newConfiguration.getKey()).setValue("myUpdateValue"); testRunner.accept(newConfiguration, updateConfiguration); testRunner.accept(newConfiguration.setLabel(label), updateConfiguration.setLabel(label)); } @Test public abstract void deleteSettingNullKey(); @Test public abstract void listWithKeyAndLabel(); @Test public abstract void listWithMultipleKeys(); void listWithMultipleKeysRunner(String key, String key2, BiFunction<ConfigurationSetting, ConfigurationSetting, Iterable<ConfigurationSetting>> testRunner) { final ConfigurationSetting setting = new ConfigurationSetting().setKey(key).setValue("value"); final ConfigurationSetting setting2 = new ConfigurationSetting().setKey(key2).setValue("value"); final Set<ConfigurationSetting> expectedSelection = new HashSet<>(Arrays.asList(setting, setting2)); testRunner.apply(setting, setting2).forEach(actual -> expectedSelection.removeIf(expected -> expected.equals(cleanResponse(expected, actual)))); assertTrue(expectedSelection.isEmpty()); } @Test public abstract void listWithMultipleLabels(); void listWithMultipleLabelsRunner(String key, String label, String label2, BiFunction<ConfigurationSetting, ConfigurationSetting, Iterable<ConfigurationSetting>> testRunner) { final ConfigurationSetting setting = new ConfigurationSetting().setKey(key).setValue("value").setLabel(label); final ConfigurationSetting setting2 = new ConfigurationSetting().setKey(key).setValue("value").setLabel(label2); final Set<ConfigurationSetting> expectedSelection = new HashSet<>(Arrays.asList(setting, setting2)); for (ConfigurationSetting actual : testRunner.apply(setting, setting2)) { expectedSelection.removeIf(expected -> expected.equals(cleanResponse(expected, actual))); } assertTrue(expectedSelection.isEmpty()); } @Test public abstract void listSettingsSelectFields(); void listSettingsSelectFieldsRunner(BiFunction<List<ConfigurationSetting>, SettingSelector, Iterable<ConfigurationSetting>> testRunner) { final String label = "my-first-mylabel"; final String label2 = "my-second-mylabel"; final int numberToCreate = 8; final Map<String, String> tags = new HashMap<>(); tags.put("tag1", "value1"); tags.put("tag2", "value2"); final SettingSelector selector = new SettingSelector() .setLabels("*-second*") .setKeys(keyPrefix + "-fetch-*") .setFields(SettingFields.KEY, SettingFields.ETAG, SettingFields.CONTENT_TYPE, SettingFields.TAGS); List<ConfigurationSetting> settings = new ArrayList<>(numberToCreate); for (int value = 0; value < numberToCreate; value++) { String key = value % 2 == 0 ? keyPrefix + "-" + value : keyPrefix + "-fetch-" + value; String lbl = value / 4 == 0 ? label : label2; settings.add(new ConfigurationSetting().setKey(key).setValue("myValue2").setLabel(lbl).setTags(tags)); } for (ConfigurationSetting setting : testRunner.apply(settings, selector)) { assertNotNull(setting.getETag()); assertNotNull(setting.getKey()); assertTrue(setting.getKey().contains(keyPrefix)); assertNotNull(setting.getTags()); assertEquals(tags.size(), setting.getTags().size()); assertNull(setting.getLastModified()); assertNull(setting.getContentType()); assertNull(setting.getLabel()); } } @Test public abstract void listSettingsAcceptDateTime(); @Test public abstract void listRevisions(); static void validateListRevisions(ConfigurationSetting expected, ConfigurationSetting actual) { assertEquals(expected.getKey(), actual.getKey()); assertNotNull(actual.getETag()); assertNull(actual.getValue()); assertNull(actual.getLastModified()); } @Test public abstract void listRevisionsWithMultipleKeys(); void listRevisionsWithMultipleKeysRunner(String key, String key2, Function<List<ConfigurationSetting>, Iterable<ConfigurationSetting>> testRunner) { final ConfigurationSetting setting = new ConfigurationSetting().setKey(key).setValue("value"); final ConfigurationSetting settingUpdate = new ConfigurationSetting().setKey(setting.getKey()).setValue("updatedValue"); final ConfigurationSetting setting2 = new ConfigurationSetting().setKey(key2).setValue("value"); final ConfigurationSetting setting2Update = new ConfigurationSetting().setKey(setting2.getKey()).setValue("updatedValue"); final List<ConfigurationSetting> testInput = Arrays.asList(setting, settingUpdate, setting2, setting2Update); final Set<ConfigurationSetting> expectedSelection = new HashSet<>(testInput); for (ConfigurationSetting actual : testRunner.apply(testInput)) { expectedSelection.removeIf(expected -> expected.equals(cleanResponse(expected, actual))); } assertTrue(expectedSelection.isEmpty()); } @Test public abstract void listRevisionsWithMultipleLabels(); void listRevisionsWithMultipleLabelsRunner(String key, String label, String label2, Function<List<ConfigurationSetting>, Iterable<ConfigurationSetting>> testRunner) { final ConfigurationSetting setting = new ConfigurationSetting().setKey(key).setValue("value").setLabel(label); final ConfigurationSetting settingUpdate = new ConfigurationSetting().setKey(setting.getKey()).setLabel(setting.getLabel()).setValue("updatedValue"); final ConfigurationSetting setting2 = new ConfigurationSetting().setKey(key).setValue("value").setLabel(label2); final ConfigurationSetting setting2Update = new ConfigurationSetting().setKey(setting2.getKey()).setLabel(setting2.getLabel()).setValue("updatedValue"); final List<ConfigurationSetting> testInput = Arrays.asList(setting, settingUpdate, setting2, setting2Update); final Set<ConfigurationSetting> expectedSelection = new HashSet<>(testInput); for (ConfigurationSetting actual : testRunner.apply(testInput)) { expectedSelection.removeIf(expected -> expected.equals(cleanResponse(expected, actual))); } assertTrue(expectedSelection.isEmpty()); } @Test public abstract void listRevisionsWithRange(); @Test @Ignore("alzimmermsft to investigate") public abstract void listRevisionsInvalidRange(); @Test public abstract void listRevisionsAcceptDateTime(); @Test public abstract void listRevisionsWithPagination(); @Test public abstract void listSettingsWithPagination(); @Test public abstract void listRevisionsWithPaginationAndRepeatStream(); @Test public abstract void listRevisionsWithPaginationAndRepeatIterator(); @Ignore("Getting a configuration setting only when the value has changed is not a common scenario.") @Test public abstract void getSettingWhenValueNotUpdated(); @Ignore("This test exists to clean up resources missed due to 429s.") @Test public abstract void deleteAllSettings(); /** * Helper method to verify that the RestResponse matches what was expected. This method assumes a response status of 200. * * @param expected ConfigurationSetting expected to be returned by the service * @param response RestResponse returned by the service, the body should contain a ConfigurationSetting */ static void assertConfigurationEquals(ConfigurationSetting expected, Response<ConfigurationSetting> response) { assertConfigurationEquals(expected, response, 200); } /** * Helper method to verify that the RestResponse matches what was expected. * * @param expected ConfigurationSetting expected to be returned by the service * @param response RestResponse returned from the service, the body should contain a ConfigurationSetting * @param expectedStatusCode Expected HTTP status code returned by the service */ /** * Helper method to verify that the returned ConfigurationSetting matches what was expected. * * @param expected ConfigurationSetting expected to be returned by the service * @param actual ConfigurationSetting contained in the RestResponse body */ static void assertConfigurationEquals(ConfigurationSetting expected, ConfigurationSetting actual) { if (expected != null && actual != null) { actual = cleanResponse(expected, actual); } assertEquals(expected, actual); } /** * The ConfigurationSetting has some fields that are only manipulated by the service, * this helper method cleans those fields on the setting returned by the service so tests are able to pass. * @param expected ConfigurationSetting expected to be returned by the service. * @param actual ConfigurationSetting returned by the service. */ private static ConfigurationSetting cleanResponse(ConfigurationSetting expected, ConfigurationSetting actual) { ConfigurationSetting cleanedActual = new ConfigurationSetting() .setKey(actual.getKey()) .setLabel(actual.getLabel()) .setValue(actual.getValue()) .setTags(actual.getTags()) .setContentType(actual.getContentType()) .setETag(expected.getETag()); try { Field lastModified = ConfigurationSetting.class.getDeclaredField("lastModified"); lastModified.setAccessible(true); lastModified.set(actual, expected.getLastModified()); } catch (NoSuchFieldException | IllegalAccessException ex) { } if (ConfigurationSetting.NO_LABEL.equals(expected.getLabel()) && actual.getLabel() == null) { cleanedActual.setLabel(ConfigurationSetting.NO_LABEL); } return cleanedActual; } static void assertRestException(Runnable exceptionThrower, int expectedStatusCode) { assertRestException(exceptionThrower, HttpResponseException.class, expectedStatusCode); } static void assertRestException(Runnable exceptionThrower, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) { try { exceptionThrower.run(); fail(); } catch (Throwable ex) { assertRestException(ex, expectedExceptionType, expectedStatusCode); } } /** * Helper method to verify the error was a HttpResponseException and it has a specific HTTP response code. * * @param exception Expected error thrown during the test * @param expectedStatusCode Expected HTTP status code contained in the error response */ static void assertRestException(Throwable exception, int expectedStatusCode) { assertRestException(exception, HttpResponseException.class, expectedStatusCode); } static void assertRestException(Throwable exception, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) { assertEquals(expectedExceptionType, exception.getClass()); assertEquals(expectedStatusCode, ((HttpResponseException) exception).getResponse().getStatusCode()); } /** * Helper method to verify that a command throws an IllegalArgumentException. * * @param exceptionThrower Command that should throw the exception */ static <T> void assertRunnableThrowsException(Runnable exceptionThrower, Class<T> exception) { try { exceptionThrower.run(); fail(); } catch (Exception ex) { assertEquals(exception, ex.getClass()); } } }
Yep, once app configuration is refactored.
public ConfigurationSetting setSetting(String key, String value) { return setSetting(new ConfigurationSetting().key(key).value(value), Context.NONE).getValue(); }
return setSetting(new ConfigurationSetting().key(key).value(value), Context.NONE).getValue();
public ConfigurationSetting setSetting(String key, String value) { return setSettingWithResponse(new ConfigurationSetting().setKey(key).setValue(value), Context.NONE).getValue(); }
class ConfigurationClient { private final ConfigurationAsyncClient client; /** * Creates a ConfigurationClient that sends requests to the configuration service at {@code serviceEndpoint}. * Each service call goes through the {@code pipeline}. * * @param client The {@link ConfigurationAsyncClient} that the client routes its request through. */ ConfigurationClient(ConfigurationAsyncClient client) { this.client = client; } /** * Adds a configuration value in the service if that key does not exist. * * <p><strong>Code Samples</strong></p> * * <p>Add a setting with the key "prodDBConnection" and value "db_connection".</p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.addSetting * * @param key The key of the configuration setting to add. * @param value The value associated with this configuration setting key. * @return The {@link ConfigurationSetting} that was created, or {@code null}, if a key collision occurs or the key * is an invalid value (which will also throw ServiceRequestException described below). * @throws IllegalArgumentException If {@code key} is {@code null}. * @throws ResourceModifiedException If a ConfigurationSetting with the same key exists. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting addSetting(String key, String value) { return addSetting(new ConfigurationSetting().key(key).value(value), Context.NONE).getValue(); } /** * Adds a configuration value in the service if that key and label does not exist. The label value of the * ConfigurationSetting is optional. * * <p><strong>Code Samples</strong></p> * * <p>Add a setting with the key "prodDBConnection", label "westUS", and value "db_connection".</p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.addSetting * * @param setting The setting to add to the configuration service. * @return The {@link ConfigurationSetting} that was created, or {@code null}, if a key collision occurs or the key * is an invalid value (which will also throw ServiceRequestException described below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceModifiedException If a ConfigurationSetting with the same key and label exists. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting addSetting(ConfigurationSetting setting) { return addSetting(setting, Context.NONE).getValue(); } /** * Adds a configuration value in the service if that key and label does not exist. The label value of the * ConfigurationSetting is optional. * * <p><strong>Code Samples</strong></p> * * <p>Add a setting with the key "prodDBConnection", label "westUS", and value "db_connection".</p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.addSettingWithResponse * * @param setting The setting to add to the configuration service. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A REST response containing the the {@link ConfigurationSetting} that was created, or {@code null}, if a * key collision occurs or the key is an invalid value (which will also throw ServiceRequestException described * below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceModifiedException If a ConfigurationSetting with the same key and label exists. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<ConfigurationSetting> addSettingWithResponse(ConfigurationSetting setting, Context context) { return addSetting(setting, context); } private Response<ConfigurationSetting> addSetting(ConfigurationSetting setting, Context context) { return client.addSetting(setting, context).block(); } /** * Creates or updates a configuration value in the service with the given key. * * <p><strong>Code Samples</strong></p> * * <p>Add a setting with the key "prodDBConnection" and value "db_connection".</p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.setSetting * * @param key The key of the configuration setting to create or update. * @param value The value of this configuration setting. * @return The {@link ConfigurationSetting} that was created or updated, or {@code null}, if the key is an invalid * value (which will also throw ServiceRequestException described below). * @throws IllegalArgumentException If {@code key} is {@code null}. * @throws ResourceModifiedException If the setting exists and is locked. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Creates or updates a configuration value in the service. Partial updates are not supported and the entire * configuration setting is updated. * * If {@link ConfigurationSetting * setting's etag matches. If the etag's value is equal to the wildcard character ({@code "*"}), the setting * will always be updated. * * <p><strong>Code Samples</strong></p> * * <p>Add a setting with the key "prodDBConnection", label "westUS", and value "db_connection". <p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.setSetting * * @param setting The configuration setting to create or update. * @return The {@link ConfigurationSetting} that was created or updated, or {@code null}, if the key is an invalid * value, the setting is locked, or an etag was provided but does not match the service's current etag value * (which will also throw ServiceRequestException described below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceModifiedException If the {@link ConfigurationSetting * wildcard character, and the current configuration value's etag does not match, or the setting exists and is * locked. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting setSetting(ConfigurationSetting setting) { return setSetting(setting, Context.NONE).getValue(); } /** * Creates or updates a configuration value in the service. Partial updates are not supported and the entire * configuration setting is updated. * * If {@link ConfigurationSetting * setting's etag matches. If the etag's value is equal to the wildcard character ({@code "*"}), the setting * will always be updated. * * <p><strong>Code Samples</strong></p> * * <p>Add a setting with the key "prodDBConnection" and value "db_connection".</p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.setSettingWithResponse * * @param setting The configuration setting to create or update. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The {@link ConfigurationSetting} that was created or updated, or {@code null}, if the key is an invalid * value, the setting is locked, or an etag was provided but does not match the service's current etag value * (which will also throw ServiceRequestException described below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceModifiedException If the {@link ConfigurationSetting * wildcard character, and the current configuration value's etag does not match, or the setting exists and is * locked. * @throws HttpResponseException If {@code key} is an empty string. */ public Response<ConfigurationSetting> setSettingWithResponse(ConfigurationSetting setting, Context context) { return client.setSetting(setting, context).block(); } private Response<ConfigurationSetting> setSetting(ConfigurationSetting setting, Context context) { return client.setSetting(setting, context).block(); } /** * Updates an existing configuration value in the service with the given key. The setting must already exist. * * <p><strong>Code Samples</strong></p> * * <p>Update a setting with the key "prodDBConnection" to have the value "updated_db_connection".</p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.updateSetting * * @param key The key of the configuration setting to update. * @param value The updated value of this configuration setting. * @return The {@link ConfigurationSetting} that was updated, or {@code null}, if the configuration value does not * exist, is locked, or the key is an invalid value (which will also throw ServiceRequestException described * below). * @throws IllegalArgumentException If {@code key} is {@code null}. * @throws HttpResponseException If a ConfigurationSetting with the key does not exist or the configuration value is * locked. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting updateSetting(String key, String value) { return updateSetting(new ConfigurationSetting().key(key).value(value), Context.NONE).getValue(); } /** * Updates an existing configuration value in the service. The setting must already exist. Partial updates are not * supported, the entire configuration value is replaced. * * If {@link ConfigurationSetting * * <p><strong>Code Samples</strong></p> * * <p>Update the setting with the key-label pair "prodDBConnection"-"westUS" to have the value * "updated_db_connection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.updateSetting * * @param setting The setting to add or update in the service. * @return The {@link ConfigurationSetting} that was updated, or {@code null}, if the configuration value does not * exist, is locked, or the key is an invalid value (which will also throw ServiceRequestException described * below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceModifiedException If a ConfigurationSetting with the same key and label does not exist, the * setting is locked, or {@link ConfigurationSetting * value. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting updateSetting(ConfigurationSetting setting) { return updateSetting(setting, Context.NONE).getValue(); } /** * Updates an existing configuration value in the service. The setting must already exist. Partial updates are not * supported, the entire configuration value is replaced. * * If {@link ConfigurationSetting * * <p><strong>Code Samples</strong></p> * * <p>Update the setting with the key-label pair "prodDBConnection"-"westUS" to have the value * "updated_db_connection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.updateSettingWithResponse * * @param setting The setting to add or update in the service. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A REST response containing the {@link ConfigurationSetting} that was updated, or {@code null}, if the * configuration value does not exist, is locked, or the key is an invalid value (which will also throw * ServiceRequestException described below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceModifiedException If a ConfigurationSetting with the same key and label does not exist, the * setting is locked, or {@link ConfigurationSetting * value. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<ConfigurationSetting> updateSettingWithResponse(ConfigurationSetting setting, Context context) { return updateSetting(setting, context); } private Response<ConfigurationSetting> updateSetting(ConfigurationSetting setting, Context context) { return client.updateSetting(setting, context).block(); } /** * Attempts to get a ConfigurationSetting that matches the {@code key}. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve the setting with the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.getSetting * * @param key The key of the setting to retrieve. * @return The {@link ConfigurationSetting} stored in the service, or {@code null}, if the configuration value does * not exist or the key is an invalid value (which will also throw ServiceRequestException described below). * @throws IllegalArgumentException If {@code key} is {@code null}. * @throws ResourceNotFoundException If a ConfigurationSetting with {@code key} does not exist. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting getSetting(String key) { return getSetting(new ConfigurationSetting().key(key), Context.NONE).getValue(); } /** * Attempts to get the ConfigurationSetting given the {@code key}, optional {@code label}. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve the setting with the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.getSetting * * @param setting The setting to retrieve based on its key and optional label combination. * @return The {@link ConfigurationSetting} stored in the service, or {@code null}, if the configuration value does * not exist or the key is an invalid value (which will also throw ServiceRequestException described below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceNotFoundException If a ConfigurationSetting with the same key and label does not exist. * @throws HttpResponseException If the {@code} key is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting getSetting(ConfigurationSetting setting) { return getSetting(setting, Context.NONE).getValue(); } /** * Attempts to get the ConfigurationSetting given the {@code key}, optional {@code label}. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve the setting with the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.getSettingWithResponse * * @param setting The setting to retrieve based on its key and optional label combination. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A REST response containg the {@link ConfigurationSetting} stored in the service, or {@code null}, if the * configuration value does not exist or the key is an invalid value (which will also throw * ServiceRequestException described below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceNotFoundException If a ConfigurationSetting with the same key and label does not exist. * @throws HttpResponseException If the {@code} key is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<ConfigurationSetting> getSettingWithResponse(ConfigurationSetting setting, Context context) { return getSetting(setting, context); } private Response<ConfigurationSetting> getSetting(ConfigurationSetting setting, Context context) { return client.getSetting(setting, context).block(); } /** * Deletes the ConfigurationSetting with a matching {@code key}. * * <p><strong>Code Samples</strong></p> * * <p>Delete the setting with the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.deleteSetting * * @param key The key of the setting to delete. * @return The deleted ConfigurationSetting or {@code null} if it didn't exist. {@code null} is also returned if * the {@code key} is an invalid value (which will also throw ServiceRequestException described below). * @throws IllegalArgumentException If {@code key} is {@code null}. * @throws ResourceModifiedException If the ConfigurationSetting is locked. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting deleteSetting(String key) { return deleteSetting(new ConfigurationSetting().key(key), Context.NONE).getValue(); } /** * Deletes the {@link ConfigurationSetting} with a matching key, along with the given label and etag. * * If {@link ConfigurationSetting * then the setting is <b>only</b> deleted if the etag matches the current etag; this means that no one has updated * the ConfigurationSetting yet. * * <p><strong>Code Samples</strong></p> * * <p>Delete the setting with the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.deleteSetting * * @param setting The ConfigurationSetting to delete. * @return The deleted ConfigurationSetting or {@code null} if didn't exist. {@code null} is also returned if * the {@code key} is an invalid value or {@link ConfigurationSetting * current etag (which will also throw ServiceRequestException described below). * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws NullPointerException When {@code setting} is {@code null}. * @throws ResourceModifiedException If the ConfigurationSetting is locked. * @throws ResourceNotFoundException If {@link ConfigurationSetting * character, and does not match the current etag value. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting deleteSetting(ConfigurationSetting setting) { return deleteSetting(setting, Context.NONE).getValue(); } /** * Deletes the {@link ConfigurationSetting} with a matching key, along with the given label and etag. * * If {@link ConfigurationSetting * then the setting is <b>only</b> deleted if the etag matches the current etag; this means that no one has updated * the ConfigurationSetting yet. * * <p><strong>Code Samples</strong></p> * * <p>Delete the setting with the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.deleteSettingWithResponse * * @param setting The ConfigurationSetting to delete. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A REST response containing the deleted ConfigurationSetting or {@code null} if didn't exist. {@code null} * is also returned if the {@code key} is an invalid value or {@link ConfigurationSetting * but does not match the current etag (which will also throw ServiceRequestException described below). * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws NullPointerException When {@code setting} is {@code null}. * @throws ResourceModifiedException If the ConfigurationSetting is locked. * @throws ResourceNotFoundException If {@link ConfigurationSetting * character, and does not match the current etag value. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<ConfigurationSetting> deleteSettingWithResponse(ConfigurationSetting setting, Context context) { return deleteSetting(setting, context); } private Response<ConfigurationSetting> deleteSetting(ConfigurationSetting setting, Context context) { return client.deleteSetting(setting, context).block(); } /** * Fetches the configuration settings that match the {@code options}. If {@code options} is {@code null}, then all * the {@link ConfigurationSetting configuration settings} are fetched with their current values. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve all settings that use the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.listSettings * * @param options Optional. Options to filter configuration setting results from the service. * @return A {@link PagedIterable} of ConfigurationSettings that matches the {@code options}. If no options were * provided, the List contains all of the current settings in the service. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<ConfigurationSetting> listSettings(SettingSelector options) { return listSettings(options, Context.NONE); } /** * Fetches the configuration settings that match the {@code options}. If {@code options} is {@code null}, then all * the {@link ConfigurationSetting configuration settings} are fetched with their current values. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve all settings that use the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.listSettings * * @param options Optional. Options to filter configuration setting results from the service. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A {@link PagedIterable} of ConfigurationSettings that matches the {@code options}. If no options were * provided, the {@link PagedIterable} contains all of the current settings in the service. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<ConfigurationSetting> listSettings(SettingSelector options, Context context) { return new PagedIterable<>(client.listSettings(options, context)); } /** * Lists chronological/historical representation of {@link ConfigurationSetting} resource(s). Revisions are provided * in descending order from their {@link ConfigurationSetting * after a period of time. The service maintains change history for up to 7 days. * * If {@code options} is {@code null}, then all the {@link ConfigurationSetting ConfigurationSettings} are fetched * in their current state. Otherwise, the results returned match the parameters given in {@code options}. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve all revisions of the setting that has the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.listSettingRevisions * * @param selector Optional. Used to filter configuration setting revisions from the service. * @return {@link PagedIterable} of {@link ConfigurationSetting} revisions. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<ConfigurationSetting> listSettingRevisions(SettingSelector selector) { return listSettingRevisions(selector, Context.NONE); } /** * Lists chronological/historical representation of {@link ConfigurationSetting} resource(s). Revisions are provided * in descending order from their {@link ConfigurationSetting * after a period of time. The service maintains change history for up to 7 days. * * If {@code options} is {@code null}, then all the {@link ConfigurationSetting ConfigurationSettings} are fetched * in their current state. Otherwise, the results returned match the parameters given in {@code options}. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve all revisions of the setting that has the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.listSettingRevisions * * @param selector Optional. Used to filter configuration setting revisions from the service. * @param context Additional context that is passed through the Http pipeline during the service call. * @return {@link PagedIterable} of {@link ConfigurationSetting} revisions. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<ConfigurationSetting> listSettingRevisions(SettingSelector selector, Context context) { return new PagedIterable<>(client.listSettingRevisions(selector, context)); } }
class ConfigurationClient { private final ConfigurationAsyncClient client; /** * Creates a ConfigurationClient that sends requests to the configuration service at {@code serviceEndpoint}. Each * service call goes through the {@code pipeline}. * * @param client The {@link ConfigurationAsyncClient} that the client routes its request through. */ ConfigurationClient(ConfigurationAsyncClient client) { this.client = client; } /** * Adds a configuration value in the service if that key does not exist. * * <p><strong>Code Samples</strong></p> * * <p>Add a setting with the key "prodDBConnection" and value "db_connection".</p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.addSetting * * @param key The key of the configuration setting to add. * @param value The value associated with this configuration setting key. * @return The {@link ConfigurationSetting} that was created, or {@code null}, if a key collision occurs or the key * is an invalid value (which will also throw ServiceRequestException described below). * @throws IllegalArgumentException If {@code key} is {@code null}. * @throws ResourceModifiedException If a ConfigurationSetting with the same key exists. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting addSetting(String key, String value) { return addSetting(new ConfigurationSetting().setKey(key).setValue(value), Context.NONE).getValue(); } /** * Adds a configuration value in the service if that key and label does not exist. The label value of the * ConfigurationSetting is optional. * * <p><strong>Code Samples</strong></p> * * <p>Add a setting with the key "prodDBConnection", label "westUS", and value "db_connection".</p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.addSetting * * @param setting The setting to add to the configuration service. * @return The {@link ConfigurationSetting} that was created, or {@code null}, if a key collision occurs or the key * is an invalid value (which will also throw ServiceRequestException described below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceModifiedException If a ConfigurationSetting with the same key and label exists. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting addSetting(ConfigurationSetting setting) { return addSetting(setting, Context.NONE).getValue(); } /** * Adds a configuration value in the service if that key and label does not exist. The label value of the * ConfigurationSetting is optional. * * <p><strong>Code Samples</strong></p> * * <p>Add a setting with the key "prodDBConnection", label "westUS", and value "db_connection".</p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.addSettingWithResponse * * @param setting The setting to add to the configuration service. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A REST response containing the the {@link ConfigurationSetting} that was created, or {@code null}, if a * key collision occurs or the key is an invalid value (which will also throw ServiceRequestException described * below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceModifiedException If a ConfigurationSetting with the same key and label exists. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<ConfigurationSetting> addSettingWithResponse(ConfigurationSetting setting, Context context) { return addSetting(setting, context); } private Response<ConfigurationSetting> addSetting(ConfigurationSetting setting, Context context) { return client.addSetting(setting, context).block(); } /** * Creates or updates a configuration value in the service with the given key. * * <p><strong>Code Samples</strong></p> * * <p>Add a setting with the key "prodDBConnection" and value "db_connection".</p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.setSetting * * @param key The key of the configuration setting to create or update. * @param value The value of this configuration setting. * @return The {@link ConfigurationSetting} that was created or updated, or {@code null}, if the key is an invalid * value (which will also throw ServiceRequestException described below). * @throws IllegalArgumentException If {@code key} is {@code null}. * @throws ResourceModifiedException If the setting exists and is locked. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Creates or updates a configuration value in the service. Partial updates are not supported and the entire * configuration setting is updated. * * If {@link ConfigurationSetting * setting's etag matches. If the etag's value is equal to the wildcard character ({@code "*"}), the setting will * always be updated. * * <p><strong>Code Samples</strong></p> * * <p>Add a setting with the key "prodDBConnection", label "westUS", and value "db_connection". <p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.setSetting * * @param setting The configuration setting to create or update. * @return The {@link ConfigurationSetting} that was created or updated, or {@code null}, if the key is an invalid * value, the setting is locked, or an etag was provided but does not match the service's current etag value (which * will also throw ServiceRequestException described below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceModifiedException If the {@link ConfigurationSetting * wildcard character, and the current configuration value's etag does not match, or the setting exists and is * locked. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting setSetting(ConfigurationSetting setting) { return setSettingWithResponse(setting, Context.NONE).getValue(); } /** * Creates or updates a configuration value in the service. Partial updates are not supported and the entire * configuration setting is updated. * * If {@link ConfigurationSetting * setting's etag matches. If the etag's value is equal to the wildcard character ({@code "*"}), the setting will * always be updated. * * <p><strong>Code Samples</strong></p> * * <p>Add a setting with the key "prodDBConnection" and value "db_connection".</p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.setSettingWithResponse * * @param setting The configuration setting to create or update. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The {@link ConfigurationSetting} that was created or updated, or {@code null}, if the key is an invalid * value, the setting is locked, or an etag was provided but does not match the service's current etag value (which * will also throw ServiceRequestException described below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceModifiedException If the {@link ConfigurationSetting * wildcard character, and the current configuration value's etag does not match, or the setting exists and is * locked. * @throws HttpResponseException If {@code key} is an empty string. */ public Response<ConfigurationSetting> setSettingWithResponse(ConfigurationSetting setting, Context context) { return client.setSetting(setting, context).block(); } /** * Updates an existing configuration value in the service with the given key. The setting must already exist. * * <p><strong>Code Samples</strong></p> * * <p>Update a setting with the key "prodDBConnection" to have the value "updated_db_connection".</p> * * {@codesnippet com.azure.data.appconfiguration.ConfigurationClient.updateSetting * * @param key The key of the configuration setting to update. * @param value The updated value of this configuration setting. * @return The {@link ConfigurationSetting} that was updated, or {@code null}, if the configuration value does not * exist, is locked, or the key is an invalid value (which will also throw ServiceRequestException described * below). * @throws IllegalArgumentException If {@code key} is {@code null}. * @throws HttpResponseException If a ConfigurationSetting with the key does not exist or the configuration value is * locked. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting updateSetting(String key, String value) { return updateSetting(new ConfigurationSetting().setKey(key).setValue(value), Context.NONE).getValue(); } /** * Updates an existing configuration value in the service. The setting must already exist. Partial updates are not * supported, the entire configuration value is replaced. * * If {@link ConfigurationSetting * matches. * * <p><strong>Code Samples</strong></p> * * <p>Update the setting with the key-label pair "prodDBConnection"-"westUS" to have the value * "updated_db_connection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.updateSetting * * @param setting The setting to add or update in the service. * @return The {@link ConfigurationSetting} that was updated, or {@code null}, if the configuration value does not * exist, is locked, or the key is an invalid value (which will also throw ServiceRequestException described * below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceModifiedException If a ConfigurationSetting with the same key and label does not exist, the * setting is locked, or {@link ConfigurationSetting * value. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting updateSetting(ConfigurationSetting setting) { return updateSetting(setting, Context.NONE).getValue(); } /** * Updates an existing configuration value in the service. The setting must already exist. Partial updates are not * supported, the entire configuration value is replaced. * * If {@link ConfigurationSetting * matches. * * <p><strong>Code Samples</strong></p> * * <p>Update the setting with the key-label pair "prodDBConnection"-"westUS" to have the value * "updated_db_connection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.updateSettingWithResponse * * @param setting The setting to add or update in the service. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A REST response containing the {@link ConfigurationSetting} that was updated, or {@code null}, if the * configuration value does not exist, is locked, or the key is an invalid value (which will also throw * ServiceRequestException described below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceModifiedException If a ConfigurationSetting with the same key and label does not exist, the * setting is locked, or {@link ConfigurationSetting * value. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<ConfigurationSetting> updateSettingWithResponse(ConfigurationSetting setting, Context context) { return updateSetting(setting, context); } private Response<ConfigurationSetting> updateSetting(ConfigurationSetting setting, Context context) { return client.updateSetting(setting, context).block(); } /** * Attempts to get a ConfigurationSetting that matches the {@code key}. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve the setting with the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.getSetting * * @param key The key of the setting to retrieve. * @return The {@link ConfigurationSetting} stored in the service, or {@code null}, if the configuration value does * not exist or the key is an invalid value (which will also throw ServiceRequestException described below). * @throws IllegalArgumentException If {@code key} is {@code null}. * @throws ResourceNotFoundException If a ConfigurationSetting with {@code key} does not exist. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting getSetting(String key) { return getSetting(new ConfigurationSetting().setKey(key), Context.NONE).getValue(); } /** * Attempts to get the ConfigurationSetting given the {@code key}, optional {@code label}. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve the setting with the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.getSetting * * @param setting The setting to retrieve based on its key and optional label combination. * @return The {@link ConfigurationSetting} stored in the service, or {@code null}, if the configuration value does * not exist or the key is an invalid value (which will also throw ServiceRequestException described below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceNotFoundException If a ConfigurationSetting with the same key and label does not exist. * @throws HttpResponseException If the {@code} key is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting getSetting(ConfigurationSetting setting) { return getSetting(setting, Context.NONE).getValue(); } /** * Attempts to get the ConfigurationSetting given the {@code key}, optional {@code label}. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve the setting with the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.getSettingWithResponse * * @param setting The setting to retrieve based on its key and optional label combination. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A REST response containg the {@link ConfigurationSetting} stored in the service, or {@code null}, if the * configuration value does not exist or the key is an invalid value (which will also throw ServiceRequestException * described below). * @throws NullPointerException If {@code setting} is {@code null}. * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws ResourceNotFoundException If a ConfigurationSetting with the same key and label does not exist. * @throws HttpResponseException If the {@code} key is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<ConfigurationSetting> getSettingWithResponse(ConfigurationSetting setting, Context context) { return getSetting(setting, context); } private Response<ConfigurationSetting> getSetting(ConfigurationSetting setting, Context context) { return client.getSetting(setting, context).block(); } /** * Deletes the ConfigurationSetting with a matching {@code key}. * * <p><strong>Code Samples</strong></p> * * <p>Delete the setting with the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.deleteSetting * * @param key The key of the setting to delete. * @return The deleted ConfigurationSetting or {@code null} if it didn't exist. {@code null} is also returned if the * {@code key} is an invalid value (which will also throw ServiceRequestException described below). * @throws IllegalArgumentException If {@code key} is {@code null}. * @throws ResourceModifiedException If the ConfigurationSetting is locked. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting deleteSetting(String key) { return deleteSetting(new ConfigurationSetting().setKey(key), Context.NONE).getValue(); } /** * Deletes the {@link ConfigurationSetting} with a matching key, along with the given label and etag. * * If {@link ConfigurationSetting * the setting is <b>only</b> deleted if the etag matches the current etag; this means that no one has updated the * ConfigurationSetting yet. * * <p><strong>Code Samples</strong></p> * * <p>Delete the setting with the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.deleteSetting * * @param setting The ConfigurationSetting to delete. * @return The deleted ConfigurationSetting or {@code null} if didn't exist. {@code null} is also returned if the * {@code key} is an invalid value or {@link ConfigurationSetting * current etag (which will also throw ServiceRequestException described below). * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws NullPointerException When {@code setting} is {@code null}. * @throws ResourceModifiedException If the ConfigurationSetting is locked. * @throws ResourceNotFoundException If {@link ConfigurationSetting * character, and does not match the current etag value. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public ConfigurationSetting deleteSetting(ConfigurationSetting setting) { return deleteSetting(setting, Context.NONE).getValue(); } /** * Deletes the {@link ConfigurationSetting} with a matching key, along with the given label and etag. * * If {@link ConfigurationSetting * the setting is <b>only</b> deleted if the etag matches the current etag; this means that no one has updated the * ConfigurationSetting yet. * * <p><strong>Code Samples</strong></p> * * <p>Delete the setting with the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.deleteSettingWithResponse * * @param setting The ConfigurationSetting to delete. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A REST response containing the deleted ConfigurationSetting or {@code null} if didn't exist. {@code null} * is also returned if the {@code key} is an invalid value or {@link ConfigurationSetting * does not match the current etag (which will also throw ServiceRequestException described below). * @throws IllegalArgumentException If {@link ConfigurationSetting * @throws NullPointerException When {@code setting} is {@code null}. * @throws ResourceModifiedException If the ConfigurationSetting is locked. * @throws ResourceNotFoundException If {@link ConfigurationSetting * character, and does not match the current etag value. * @throws HttpResponseException If {@code key} is an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<ConfigurationSetting> deleteSettingWithResponse(ConfigurationSetting setting, Context context) { return deleteSetting(setting, context); } private Response<ConfigurationSetting> deleteSetting(ConfigurationSetting setting, Context context) { return client.deleteSetting(setting, context).block(); } /** * Fetches the configuration settings that match the {@code options}. If {@code options} is {@code null}, then all * the {@link ConfigurationSetting configuration settings} are fetched with their current values. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve all settings that use the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.listSettings * * @param options Optional. Options to filter configuration setting results from the service. * @return A {@link PagedIterable} of ConfigurationSettings that matches the {@code options}. If no options were * provided, the List contains all of the current settings in the service. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<ConfigurationSetting> listSettings(SettingSelector options) { return listSettings(options, Context.NONE); } /** * Fetches the configuration settings that match the {@code options}. If {@code options} is {@code null}, then all * the {@link ConfigurationSetting configuration settings} are fetched with their current values. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve all settings that use the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.listSettings * * @param options Optional. Options to filter configuration setting results from the service. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A {@link PagedIterable} of ConfigurationSettings that matches the {@code options}. If no options were * provided, the {@link PagedIterable} contains all of the current settings in the service. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<ConfigurationSetting> listSettings(SettingSelector options, Context context) { return new PagedIterable<>(client.listSettings(options, context)); } /** * Lists chronological/historical representation of {@link ConfigurationSetting} resource(s). Revisions are provided * in descending order from their {@link ConfigurationSetting * after a period of time. The service maintains change history for up to 7 days. * * If {@code options} is {@code null}, then all the {@link ConfigurationSetting ConfigurationSettings} are fetched * in their current state. Otherwise, the results returned match the parameters given in {@code options}. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve all revisions of the setting that has the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.listSettingRevisions * * @param selector Optional. Used to filter configuration setting revisions from the service. * @return {@link PagedIterable} of {@link ConfigurationSetting} revisions. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<ConfigurationSetting> listSettingRevisions(SettingSelector selector) { return listSettingRevisions(selector, Context.NONE); } /** * Lists chronological/historical representation of {@link ConfigurationSetting} resource(s). Revisions are provided * in descending order from their {@link ConfigurationSetting * after a period of time. The service maintains change history for up to 7 days. * * If {@code options} is {@code null}, then all the {@link ConfigurationSetting ConfigurationSettings} are fetched * in their current state. Otherwise, the results returned match the parameters given in {@code options}. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve all revisions of the setting that has the key "prodDBConnection".</p> * * {@codesnippet com.azure.data.applicationconfig.configurationclient.listSettingRevisions * * @param selector Optional. Used to filter configuration setting revisions from the service. * @param context Additional context that is passed through the Http pipeline during the service call. * @return {@link PagedIterable} of {@link ConfigurationSetting} revisions. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<ConfigurationSetting> listSettingRevisions(SettingSelector selector, Context context) { return new PagedIterable<>(client.listSettingRevisions(selector, context)); } }
@srnagar `String eventHubName()` is not renamed.
private static String getExpression(EventPosition eventPosition) { final String isInclusiveFlag = eventPosition.isInclusive() ? "=" : ""; if (eventPosition.getOffset() != null) { return String.format( AmqpConstants.AMQP_ANNOTATION_FORMAT, OFFSET_ANNOTATION_NAME.getValue(), isInclusiveFlag, eventPosition.getOffset()); } if (eventPosition.getSequenceNumber() != null) { return String.format( AmqpConstants.AMQP_ANNOTATION_FORMAT, SEQUENCE_NUMBER_ANNOTATION_NAME.getValue(), isInclusiveFlag, eventPosition.getSequenceNumber()); } if (eventPosition.getEnqueuedDateTime() != null) { String ms; try { ms = Long.toString(eventPosition.getEnqueuedDateTime().toEpochMilli()); } catch (ArithmeticException ex) { ms = Long.toString(Long.MAX_VALUE); } return String.format( AmqpConstants.AMQP_ANNOTATION_FORMAT, ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue(), isInclusiveFlag, ms); } throw new IllegalArgumentException("No starting position was set."); }
ms = Long.toString(Long.MAX_VALUE);
private static String getExpression(EventPosition eventPosition) { final String isInclusiveFlag = eventPosition.isInclusive() ? "=" : ""; if (eventPosition.getOffset() != null) { return String.format( AmqpConstants.AMQP_ANNOTATION_FORMAT, OFFSET_ANNOTATION_NAME.getValue(), isInclusiveFlag, eventPosition.getOffset()); } if (eventPosition.getSequenceNumber() != null) { return String.format( AmqpConstants.AMQP_ANNOTATION_FORMAT, SEQUENCE_NUMBER_ANNOTATION_NAME.getValue(), isInclusiveFlag, eventPosition.getSequenceNumber()); } if (eventPosition.getEnqueuedDateTime() != null) { String ms; try { ms = Long.toString(eventPosition.getEnqueuedDateTime().toEpochMilli()); } catch (ArithmeticException ex) { ms = Long.toString(Long.MAX_VALUE); } return String.format( AmqpConstants.AMQP_ANNOTATION_FORMAT, ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue(), isInclusiveFlag, ms); } throw new IllegalArgumentException("No starting position was set."); }
class EventHubAsyncClient implements Closeable { /** * The name of the default consumer group in the Event Hubs service. */ public static final String DEFAULT_CONSUMER_GROUP_NAME = "$Default"; private static final String RECEIVER_ENTITY_PATH_FORMAT = "%s/ConsumerGroups/%s/Partitions/%s"; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private final ClientLogger logger = new ClientLogger(EventHubAsyncClient.class); private final String connectionId; private final Mono<EventHubConnection> connectionMono; private final AtomicBoolean hasConnection = new AtomicBoolean(false); private final ConnectionOptions connectionOptions; private final String eventHubName; private final EventHubProducerOptions defaultProducerOptions; private final EventHubConsumerOptions defaultConsumerOptions; private final TracerProvider tracerProvider; EventHubAsyncClient(ConnectionOptions connectionOptions, ReactorProvider provider, ReactorHandlerProvider handlerProvider, TracerProvider tracerProvider) { Objects.requireNonNull(connectionOptions, "'connectionOptions' cannot be null."); Objects.requireNonNull(provider, "'provider' cannot be null."); Objects.requireNonNull(handlerProvider, "'handlerProvider' cannot be null."); Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.connectionOptions = connectionOptions; this.tracerProvider = tracerProvider; this.eventHubName = connectionOptions.getEventHubName(); this.connectionId = StringUtil.getRandomString("MF"); this.connectionMono = Mono.fromCallable(() -> { return (EventHubConnection) new ReactorConnection(connectionId, connectionOptions, provider, handlerProvider, new ResponseMapper()); }).doOnSubscribe(c -> hasConnection.set(true)) .cache(); this.defaultProducerOptions = new EventHubProducerOptions() .setRetry(connectionOptions.getRetry()); this.defaultConsumerOptions = new EventHubConsumerOptions() .setRetry(connectionOptions.getRetry()) .setScheduler(connectionOptions.getScheduler()); } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getProperties() { return connectionMono .flatMap(connection -> connection .getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties)); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ @ServiceMethod(returns = ReturnType.COLLECTION) public Flux<String> getPartitionIds() { return getProperties().flatMapMany(properties -> Flux.fromArray(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionMono.flatMap( connection -> connection.getManagementNode().flatMap(node -> { return node.getPartitionProperties(partitionId); })); } /** * Creates an Event Hub producer responsible for transmitting {@link EventData} to the Event Hub, grouped together * in batches. Event data is automatically routed to an available partition. * * @return A new {@link EventHubAsyncProducer}. */ public EventHubAsyncProducer createProducer() { return createProducer(defaultProducerOptions); } /** * Creates an Event Hub producer responsible for transmitting {@link EventData} to the Event Hub, grouped together * in batches. If {@link EventHubProducerOptions * events are routed to that specific partition. Otherwise, events are automatically routed to an available * partition. * * @param options The set of options to apply when creating the producer. * @return A new {@link EventHubAsyncProducer}. * @throws NullPointerException if {@code options} is {@code null}. */ public EventHubAsyncProducer createProducer(EventHubProducerOptions options) { Objects.requireNonNull(options, "'options' cannot be null."); final EventHubProducerOptions clonedOptions = options.clone(); if (clonedOptions.getRetry() == null) { clonedOptions.setRetry(connectionOptions.getRetry()); } final String entityPath; final String linkName; if (ImplUtils.isNullOrEmpty(options.getPartitionId())) { entityPath = eventHubName; linkName = StringUtil.getRandomString("EC"); } else { entityPath = String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, options.getPartitionId()); linkName = StringUtil.getRandomString("PS"); } final Mono<AmqpSendLink> amqpLinkMono = connectionMono .flatMap(connection -> connection.createSession(entityPath)) .flatMap(session -> { logger.verbose("Creating producer for {}", entityPath); final RetryPolicy retryPolicy = RetryUtil.getRetryPolicy(clonedOptions.getRetry()); return session.createProducer(linkName, entityPath, clonedOptions.getRetry().getTryTimeout(), retryPolicy) .cast(AmqpSendLink.class); }); return new EventHubAsyncProducer(amqpLinkMono, clonedOptions, tracerProvider); } /** * Creates an Event Hub consumer responsible for reading {@link EventData} from a specific Event Hub partition, as a * member of the specified consumer group, and begins reading events from the {@code eventPosition}. * * The consumer created is non-exclusive, allowing multiple consumers from the same consumer group to be actively * reading events from the partition. These non-exclusive consumers are sometimes referred to as "Non-epoch * Consumers". * * @param consumerGroup The name of the consumer group this consumer is associated with. Events are read in * the context of this group. The name of the consumer group that is created by default is {@link * * @param partitionId The identifier of the Event Hub partition. * @param eventPosition The position within the partition where the consumer should begin reading events. * @return A new {@link EventHubAsyncConsumer} that receives events from the partition at the given position. * @throws NullPointerException If {@code eventPosition}, or {@code options} is {@code null}. * @throws IllegalArgumentException If {@code consumerGroup} or {@code partitionId} is {@code null} or an * empty string. */ public EventHubAsyncConsumer createConsumer(String consumerGroup, String partitionId, EventPosition eventPosition) { return createConsumer(consumerGroup, partitionId, eventPosition, defaultConsumerOptions); } /** * Creates an Event Hub consumer responsible for reading {@link EventData} from a specific Event Hub partition, as a * member of the configured consumer group, and begins reading events from the specified {@code eventPosition}. * * <p> * A consumer may be exclusive, which asserts ownership over the partition for the consumer group to ensure that * only one consumer from that group is reading the from the partition. These exclusive consumers are sometimes * referred to as "Epoch Consumers." * * A consumer may also be non-exclusive, allowing multiple consumers from the same consumer group to be actively * reading events from the partition. These non-exclusive consumers are sometimes referred to as "Non-epoch * Consumers." * * Designating a consumer as exclusive may be specified in the {@code options}, by setting {@link * EventHubConsumerOptions * non-exclusive. * </p> * * @param consumerGroup The name of the consumer group this consumer is associated with. Events are read in * the context of this group. The name of the consumer group that is created by default is {@link * * @param partitionId The identifier of the Event Hub partition from which events will be received. * @param eventPosition The position within the partition where the consumer should begin reading events. * @param options The set of options to apply when creating the consumer. * @return An new {@link EventHubAsyncConsumer} that receives events from the partition with all configured {@link * EventHubConsumerOptions}. * @throws NullPointerException If {@code eventPosition}, {@code consumerGroup}, {@code partitionId}, or * {@code options} is {@code null}. * @throws IllegalArgumentException If {@code consumerGroup} or {@code partitionId} is an empty string. */ public EventHubAsyncConsumer createConsumer(String consumerGroup, String partitionId, EventPosition eventPosition, EventHubConsumerOptions options) { Objects.requireNonNull(eventPosition, "'eventPosition' cannot be null."); Objects.requireNonNull(options, "'options' cannot be null."); Objects.requireNonNull(consumerGroup, "'consumerGroup' cannot be null."); Objects.requireNonNull(partitionId, "'partitionId' cannot be null."); if (consumerGroup.isEmpty()) { throw logger.logExceptionAsError( new IllegalArgumentException("'consumerGroup' cannot be an empty string.")); } else if (partitionId.isEmpty()) { throw logger.logExceptionAsError( new IllegalArgumentException("'partitionId' cannot be an empty string.")); } final EventHubConsumerOptions clonedOptions = options.clone(); if (clonedOptions.getScheduler() == null) { clonedOptions.setScheduler(connectionOptions.getScheduler()); } if (clonedOptions.getRetry() == null) { clonedOptions.setRetry(connectionOptions.getRetry()); } final String linkName = StringUtil.getRandomString("PR"); final String entityPath = String.format(Locale.US, RECEIVER_ENTITY_PATH_FORMAT, eventHubName, consumerGroup, partitionId); final Mono<AmqpReceiveLink> receiveLinkMono = connectionMono.flatMap(connection -> { return connection.createSession(entityPath).cast(EventHubSession.class); }).flatMap(session -> { logger.verbose("Creating consumer for path: {}", entityPath); final RetryPolicy retryPolicy = RetryUtil.getRetryPolicy(clonedOptions.getRetry()); return session.createConsumer(linkName, entityPath, getExpression(eventPosition), clonedOptions.getRetry().getTryTimeout(), retryPolicy, options.getOwnerLevel(), options.getIdentifier()) .cast(AmqpReceiveLink.class); }); return new EventHubAsyncConsumer(receiveLinkMono, clonedOptions); } /** * Closes and disposes of connection to service. Any {@link EventHubAsyncConsumer EventHubConsumers} and {@link * EventHubAsyncProducer EventHubProducers} created with this instance will have their connections closed. */ @Override public void close() { if (hasConnection.getAndSet(false)) { try { final AmqpConnection connection = connectionMono.block(connectionOptions.getRetry().getTryTimeout()); if (connection != null) { connection.close(); } } catch (IOException exception) { throw logger.logExceptionAsError( new AmqpException(false, "Unable to close connection to service", exception, new ErrorContext(connectionOptions.getHost()))); } } } String eventHubName() { return this.eventHubName; } private static class ResponseMapper implements AmqpResponseMapper { @Override public EventHubProperties toEventHubProperties(Map<?, ?> amqpBody) { return new EventHubProperties( (String) amqpBody.get(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY), ((Date) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_CREATED_AT)).toInstant(), (String[]) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IDS)); } @Override public PartitionProperties toPartitionProperties(Map<?, ?> amqpBody) { return new PartitionProperties( (String) amqpBody.get(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY), (String) amqpBody.get(ManagementChannel.MANAGEMENT_PARTITION_NAME_KEY), (Long) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_BEGIN_SEQUENCE_NUMBER), (Long) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_SEQUENCE_NUMBER), (String) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_OFFSET), ((Date) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_TIME_UTC)).toInstant(), (Boolean) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IS_EMPTY)); } } }
class EventHubAsyncClient implements Closeable { /** * The name of the default consumer group in the Event Hubs service. */ public static final String DEFAULT_CONSUMER_GROUP_NAME = "$Default"; private static final String RECEIVER_ENTITY_PATH_FORMAT = "%s/ConsumerGroups/%s/Partitions/%s"; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private final ClientLogger logger = new ClientLogger(EventHubAsyncClient.class); private final String connectionId; private final Mono<EventHubConnection> connectionMono; private final AtomicBoolean hasConnection = new AtomicBoolean(false); private final ConnectionOptions connectionOptions; private final String eventHubName; private final EventHubProducerOptions defaultProducerOptions; private final EventHubConsumerOptions defaultConsumerOptions; private final TracerProvider tracerProvider; EventHubAsyncClient(ConnectionOptions connectionOptions, ReactorProvider provider, ReactorHandlerProvider handlerProvider, TracerProvider tracerProvider) { Objects.requireNonNull(connectionOptions, "'connectionOptions' cannot be null."); Objects.requireNonNull(provider, "'provider' cannot be null."); Objects.requireNonNull(handlerProvider, "'handlerProvider' cannot be null."); Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.connectionOptions = connectionOptions; this.tracerProvider = tracerProvider; this.eventHubName = connectionOptions.getEventHubName(); this.connectionId = StringUtil.getRandomString("MF"); this.connectionMono = Mono.fromCallable(() -> { return (EventHubConnection) new ReactorConnection(connectionId, connectionOptions, provider, handlerProvider, new ResponseMapper()); }).doOnSubscribe(c -> hasConnection.set(true)) .cache(); this.defaultProducerOptions = new EventHubProducerOptions() .setRetry(connectionOptions.getRetry()); this.defaultConsumerOptions = new EventHubConsumerOptions() .setRetry(connectionOptions.getRetry()) .setScheduler(connectionOptions.getScheduler()); } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getProperties() { return connectionMono .flatMap(connection -> connection .getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties)); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ @ServiceMethod(returns = ReturnType.COLLECTION) public Flux<String> getPartitionIds() { return getProperties().flatMapMany(properties -> Flux.fromArray(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionMono.flatMap( connection -> connection.getManagementNode().flatMap(node -> { return node.getPartitionProperties(partitionId); })); } /** * Creates an Event Hub producer responsible for transmitting {@link EventData} to the Event Hub, grouped together * in batches. Event data is automatically routed to an available partition. * * @return A new {@link EventHubAsyncProducer}. */ public EventHubAsyncProducer createProducer() { return createProducer(defaultProducerOptions); } /** * Creates an Event Hub producer responsible for transmitting {@link EventData} to the Event Hub, grouped together * in batches. If {@link EventHubProducerOptions * events are routed to that specific partition. Otherwise, events are automatically routed to an available * partition. * * @param options The set of options to apply when creating the producer. * @return A new {@link EventHubAsyncProducer}. * @throws NullPointerException if {@code options} is {@code null}. */ public EventHubAsyncProducer createProducer(EventHubProducerOptions options) { Objects.requireNonNull(options, "'options' cannot be null."); final EventHubProducerOptions clonedOptions = options.clone(); if (clonedOptions.getRetry() == null) { clonedOptions.setRetry(connectionOptions.getRetry()); } final String entityPath; final String linkName; if (ImplUtils.isNullOrEmpty(options.getPartitionId())) { entityPath = eventHubName; linkName = StringUtil.getRandomString("EC"); } else { entityPath = String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, options.getPartitionId()); linkName = StringUtil.getRandomString("PS"); } final Mono<AmqpSendLink> amqpLinkMono = connectionMono .flatMap(connection -> connection.createSession(entityPath)) .flatMap(session -> { logger.verbose("Creating producer for {}", entityPath); final RetryPolicy retryPolicy = RetryUtil.getRetryPolicy(clonedOptions.getRetry()); return session.createProducer(linkName, entityPath, clonedOptions.getRetry().getTryTimeout(), retryPolicy).cast(AmqpSendLink.class); }); return new EventHubAsyncProducer(amqpLinkMono, clonedOptions, tracerProvider); } /** * Creates an Event Hub consumer responsible for reading {@link EventData} from a specific Event Hub partition, as a * member of the specified consumer group, and begins reading events from the {@code eventPosition}. * * The consumer created is non-exclusive, allowing multiple consumers from the same consumer group to be actively * reading events from the partition. These non-exclusive consumers are sometimes referred to as "Non-epoch * Consumers". * * @param consumerGroup The name of the consumer group this consumer is associated with. Events are read in the * context of this group. The name of the consumer group that is created by default is {@link * * @param partitionId The identifier of the Event Hub partition. * @param eventPosition The position within the partition where the consumer should begin reading events. * @return A new {@link EventHubAsyncConsumer} that receives events from the partition at the given position. * @throws NullPointerException If {@code eventPosition}, or {@code options} is {@code null}. * @throws IllegalArgumentException If {@code consumerGroup} or {@code partitionId} is {@code null} or an empty * string. */ public EventHubAsyncConsumer createConsumer(String consumerGroup, String partitionId, EventPosition eventPosition) { return createConsumer(consumerGroup, partitionId, eventPosition, defaultConsumerOptions); } /** * Creates an Event Hub consumer responsible for reading {@link EventData} from a specific Event Hub partition, as a * member of the configured consumer group, and begins reading events from the specified {@code eventPosition}. * * <p> * A consumer may be exclusive, which asserts ownership over the partition for the consumer group to ensure that * only one consumer from that group is reading the from the partition. These exclusive consumers are sometimes * referred to as "Epoch Consumers." * * A consumer may also be non-exclusive, allowing multiple consumers from the same consumer group to be actively * reading events from the partition. These non-exclusive consumers are sometimes referred to as "Non-epoch * Consumers." * * Designating a consumer as exclusive may be specified in the {@code options}, by setting {@link * EventHubConsumerOptions * non-exclusive. * </p> * * @param consumerGroup The name of the consumer group this consumer is associated with. Events are read in the * context of this group. The name of the consumer group that is created by default is {@link * * @param partitionId The identifier of the Event Hub partition from which events will be received. * @param eventPosition The position within the partition where the consumer should begin reading events. * @param options The set of options to apply when creating the consumer. * @return An new {@link EventHubAsyncConsumer} that receives events from the partition with all configured {@link * EventHubConsumerOptions}. * @throws NullPointerException If {@code eventPosition}, {@code consumerGroup}, {@code partitionId}, or * {@code options} is {@code null}. * @throws IllegalArgumentException If {@code consumerGroup} or {@code partitionId} is an empty string. */ public EventHubAsyncConsumer createConsumer(String consumerGroup, String partitionId, EventPosition eventPosition, EventHubConsumerOptions options) { Objects.requireNonNull(eventPosition, "'eventPosition' cannot be null."); Objects.requireNonNull(options, "'options' cannot be null."); Objects.requireNonNull(consumerGroup, "'consumerGroup' cannot be null."); Objects.requireNonNull(partitionId, "'partitionId' cannot be null."); if (consumerGroup.isEmpty()) { throw logger.logExceptionAsError( new IllegalArgumentException("'consumerGroup' cannot be an empty string.")); } else if (partitionId.isEmpty()) { throw logger.logExceptionAsError( new IllegalArgumentException("'partitionId' cannot be an empty string.")); } final EventHubConsumerOptions clonedOptions = options.clone(); if (clonedOptions.getScheduler() == null) { clonedOptions.setScheduler(connectionOptions.getScheduler()); } if (clonedOptions.getRetry() == null) { clonedOptions.setRetry(connectionOptions.getRetry()); } final String linkName = StringUtil.getRandomString("PR"); final String entityPath = String.format(Locale.US, RECEIVER_ENTITY_PATH_FORMAT, eventHubName, consumerGroup, partitionId); final Mono<AmqpReceiveLink> receiveLinkMono = connectionMono.flatMap(connection -> connection.createSession(entityPath).cast(EventHubSession.class)).flatMap(session -> { logger.verbose("Creating consumer for path: {}", entityPath); final RetryPolicy retryPolicy = RetryUtil.getRetryPolicy(clonedOptions.getRetry()); return session.createConsumer(linkName, entityPath, getExpression(eventPosition), clonedOptions.getRetry().getTryTimeout(), retryPolicy, options.getOwnerLevel(), options.getIdentifier()).cast(AmqpReceiveLink.class); }); return new EventHubAsyncConsumer(receiveLinkMono, clonedOptions); } /** * Closes and disposes of connection to service. Any {@link EventHubAsyncConsumer EventHubConsumers} and {@link * EventHubAsyncProducer EventHubProducers} created with this instance will have their connections closed. */ @Override public void close() { if (hasConnection.getAndSet(false)) { try { final AmqpConnection connection = connectionMono.block(connectionOptions.getRetry().getTryTimeout()); if (connection != null) { connection.close(); } } catch (IOException exception) { throw logger.logExceptionAsError( new AmqpException(false, "Unable to close connection to service", exception, new ErrorContext(connectionOptions.getHost()))); } } } String getEventHubName() { return this.eventHubName; } private static class ResponseMapper implements AmqpResponseMapper { @Override public EventHubProperties toEventHubProperties(Map<?, ?> amqpBody) { return new EventHubProperties( (String) amqpBody.get(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY), ((Date) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_CREATED_AT)).toInstant(), (String[]) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IDS)); } @Override public PartitionProperties toPartitionProperties(Map<?, ?> amqpBody) { return new PartitionProperties( (String) amqpBody.get(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY), (String) amqpBody.get(ManagementChannel.MANAGEMENT_PARTITION_NAME_KEY), (Long) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_BEGIN_SEQUENCE_NUMBER), (Long) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_SEQUENCE_NUMBER), (String) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_OFFSET), ((Date) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_TIME_UTC)).toInstant(), (Boolean) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IS_EMPTY)); } } }
Updated
private static String getExpression(EventPosition eventPosition) { final String isInclusiveFlag = eventPosition.isInclusive() ? "=" : ""; if (eventPosition.getOffset() != null) { return String.format( AmqpConstants.AMQP_ANNOTATION_FORMAT, OFFSET_ANNOTATION_NAME.getValue(), isInclusiveFlag, eventPosition.getOffset()); } if (eventPosition.getSequenceNumber() != null) { return String.format( AmqpConstants.AMQP_ANNOTATION_FORMAT, SEQUENCE_NUMBER_ANNOTATION_NAME.getValue(), isInclusiveFlag, eventPosition.getSequenceNumber()); } if (eventPosition.getEnqueuedDateTime() != null) { String ms; try { ms = Long.toString(eventPosition.getEnqueuedDateTime().toEpochMilli()); } catch (ArithmeticException ex) { ms = Long.toString(Long.MAX_VALUE); } return String.format( AmqpConstants.AMQP_ANNOTATION_FORMAT, ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue(), isInclusiveFlag, ms); } throw new IllegalArgumentException("No starting position was set."); }
ms = Long.toString(Long.MAX_VALUE);
private static String getExpression(EventPosition eventPosition) { final String isInclusiveFlag = eventPosition.isInclusive() ? "=" : ""; if (eventPosition.getOffset() != null) { return String.format( AmqpConstants.AMQP_ANNOTATION_FORMAT, OFFSET_ANNOTATION_NAME.getValue(), isInclusiveFlag, eventPosition.getOffset()); } if (eventPosition.getSequenceNumber() != null) { return String.format( AmqpConstants.AMQP_ANNOTATION_FORMAT, SEQUENCE_NUMBER_ANNOTATION_NAME.getValue(), isInclusiveFlag, eventPosition.getSequenceNumber()); } if (eventPosition.getEnqueuedDateTime() != null) { String ms; try { ms = Long.toString(eventPosition.getEnqueuedDateTime().toEpochMilli()); } catch (ArithmeticException ex) { ms = Long.toString(Long.MAX_VALUE); } return String.format( AmqpConstants.AMQP_ANNOTATION_FORMAT, ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue(), isInclusiveFlag, ms); } throw new IllegalArgumentException("No starting position was set."); }
class EventHubAsyncClient implements Closeable { /** * The name of the default consumer group in the Event Hubs service. */ public static final String DEFAULT_CONSUMER_GROUP_NAME = "$Default"; private static final String RECEIVER_ENTITY_PATH_FORMAT = "%s/ConsumerGroups/%s/Partitions/%s"; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private final ClientLogger logger = new ClientLogger(EventHubAsyncClient.class); private final String connectionId; private final Mono<EventHubConnection> connectionMono; private final AtomicBoolean hasConnection = new AtomicBoolean(false); private final ConnectionOptions connectionOptions; private final String eventHubName; private final EventHubProducerOptions defaultProducerOptions; private final EventHubConsumerOptions defaultConsumerOptions; private final TracerProvider tracerProvider; EventHubAsyncClient(ConnectionOptions connectionOptions, ReactorProvider provider, ReactorHandlerProvider handlerProvider, TracerProvider tracerProvider) { Objects.requireNonNull(connectionOptions, "'connectionOptions' cannot be null."); Objects.requireNonNull(provider, "'provider' cannot be null."); Objects.requireNonNull(handlerProvider, "'handlerProvider' cannot be null."); Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.connectionOptions = connectionOptions; this.tracerProvider = tracerProvider; this.eventHubName = connectionOptions.getEventHubName(); this.connectionId = StringUtil.getRandomString("MF"); this.connectionMono = Mono.fromCallable(() -> { return (EventHubConnection) new ReactorConnection(connectionId, connectionOptions, provider, handlerProvider, new ResponseMapper()); }).doOnSubscribe(c -> hasConnection.set(true)) .cache(); this.defaultProducerOptions = new EventHubProducerOptions() .setRetry(connectionOptions.getRetry()); this.defaultConsumerOptions = new EventHubConsumerOptions() .setRetry(connectionOptions.getRetry()) .setScheduler(connectionOptions.getScheduler()); } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getProperties() { return connectionMono .flatMap(connection -> connection .getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties)); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ @ServiceMethod(returns = ReturnType.COLLECTION) public Flux<String> getPartitionIds() { return getProperties().flatMapMany(properties -> Flux.fromArray(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionMono.flatMap( connection -> connection.getManagementNode().flatMap(node -> { return node.getPartitionProperties(partitionId); })); } /** * Creates an Event Hub producer responsible for transmitting {@link EventData} to the Event Hub, grouped together * in batches. Event data is automatically routed to an available partition. * * @return A new {@link EventHubAsyncProducer}. */ public EventHubAsyncProducer createProducer() { return createProducer(defaultProducerOptions); } /** * Creates an Event Hub producer responsible for transmitting {@link EventData} to the Event Hub, grouped together * in batches. If {@link EventHubProducerOptions * events are routed to that specific partition. Otherwise, events are automatically routed to an available * partition. * * @param options The set of options to apply when creating the producer. * @return A new {@link EventHubAsyncProducer}. * @throws NullPointerException if {@code options} is {@code null}. */ public EventHubAsyncProducer createProducer(EventHubProducerOptions options) { Objects.requireNonNull(options, "'options' cannot be null."); final EventHubProducerOptions clonedOptions = options.clone(); if (clonedOptions.getRetry() == null) { clonedOptions.setRetry(connectionOptions.getRetry()); } final String entityPath; final String linkName; if (ImplUtils.isNullOrEmpty(options.getPartitionId())) { entityPath = eventHubName; linkName = StringUtil.getRandomString("EC"); } else { entityPath = String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, options.getPartitionId()); linkName = StringUtil.getRandomString("PS"); } final Mono<AmqpSendLink> amqpLinkMono = connectionMono .flatMap(connection -> connection.createSession(entityPath)) .flatMap(session -> { logger.verbose("Creating producer for {}", entityPath); final RetryPolicy retryPolicy = RetryUtil.getRetryPolicy(clonedOptions.getRetry()); return session.createProducer(linkName, entityPath, clonedOptions.getRetry().getTryTimeout(), retryPolicy) .cast(AmqpSendLink.class); }); return new EventHubAsyncProducer(amqpLinkMono, clonedOptions, tracerProvider); } /** * Creates an Event Hub consumer responsible for reading {@link EventData} from a specific Event Hub partition, as a * member of the specified consumer group, and begins reading events from the {@code eventPosition}. * * The consumer created is non-exclusive, allowing multiple consumers from the same consumer group to be actively * reading events from the partition. These non-exclusive consumers are sometimes referred to as "Non-epoch * Consumers". * * @param consumerGroup The name of the consumer group this consumer is associated with. Events are read in * the context of this group. The name of the consumer group that is created by default is {@link * * @param partitionId The identifier of the Event Hub partition. * @param eventPosition The position within the partition where the consumer should begin reading events. * @return A new {@link EventHubAsyncConsumer} that receives events from the partition at the given position. * @throws NullPointerException If {@code eventPosition}, or {@code options} is {@code null}. * @throws IllegalArgumentException If {@code consumerGroup} or {@code partitionId} is {@code null} or an * empty string. */ public EventHubAsyncConsumer createConsumer(String consumerGroup, String partitionId, EventPosition eventPosition) { return createConsumer(consumerGroup, partitionId, eventPosition, defaultConsumerOptions); } /** * Creates an Event Hub consumer responsible for reading {@link EventData} from a specific Event Hub partition, as a * member of the configured consumer group, and begins reading events from the specified {@code eventPosition}. * * <p> * A consumer may be exclusive, which asserts ownership over the partition for the consumer group to ensure that * only one consumer from that group is reading the from the partition. These exclusive consumers are sometimes * referred to as "Epoch Consumers." * * A consumer may also be non-exclusive, allowing multiple consumers from the same consumer group to be actively * reading events from the partition. These non-exclusive consumers are sometimes referred to as "Non-epoch * Consumers." * * Designating a consumer as exclusive may be specified in the {@code options}, by setting {@link * EventHubConsumerOptions * non-exclusive. * </p> * * @param consumerGroup The name of the consumer group this consumer is associated with. Events are read in * the context of this group. The name of the consumer group that is created by default is {@link * * @param partitionId The identifier of the Event Hub partition from which events will be received. * @param eventPosition The position within the partition where the consumer should begin reading events. * @param options The set of options to apply when creating the consumer. * @return An new {@link EventHubAsyncConsumer} that receives events from the partition with all configured {@link * EventHubConsumerOptions}. * @throws NullPointerException If {@code eventPosition}, {@code consumerGroup}, {@code partitionId}, or * {@code options} is {@code null}. * @throws IllegalArgumentException If {@code consumerGroup} or {@code partitionId} is an empty string. */ public EventHubAsyncConsumer createConsumer(String consumerGroup, String partitionId, EventPosition eventPosition, EventHubConsumerOptions options) { Objects.requireNonNull(eventPosition, "'eventPosition' cannot be null."); Objects.requireNonNull(options, "'options' cannot be null."); Objects.requireNonNull(consumerGroup, "'consumerGroup' cannot be null."); Objects.requireNonNull(partitionId, "'partitionId' cannot be null."); if (consumerGroup.isEmpty()) { throw logger.logExceptionAsError( new IllegalArgumentException("'consumerGroup' cannot be an empty string.")); } else if (partitionId.isEmpty()) { throw logger.logExceptionAsError( new IllegalArgumentException("'partitionId' cannot be an empty string.")); } final EventHubConsumerOptions clonedOptions = options.clone(); if (clonedOptions.getScheduler() == null) { clonedOptions.setScheduler(connectionOptions.getScheduler()); } if (clonedOptions.getRetry() == null) { clonedOptions.setRetry(connectionOptions.getRetry()); } final String linkName = StringUtil.getRandomString("PR"); final String entityPath = String.format(Locale.US, RECEIVER_ENTITY_PATH_FORMAT, eventHubName, consumerGroup, partitionId); final Mono<AmqpReceiveLink> receiveLinkMono = connectionMono.flatMap(connection -> { return connection.createSession(entityPath).cast(EventHubSession.class); }).flatMap(session -> { logger.verbose("Creating consumer for path: {}", entityPath); final RetryPolicy retryPolicy = RetryUtil.getRetryPolicy(clonedOptions.getRetry()); return session.createConsumer(linkName, entityPath, getExpression(eventPosition), clonedOptions.getRetry().getTryTimeout(), retryPolicy, options.getOwnerLevel(), options.getIdentifier()) .cast(AmqpReceiveLink.class); }); return new EventHubAsyncConsumer(receiveLinkMono, clonedOptions); } /** * Closes and disposes of connection to service. Any {@link EventHubAsyncConsumer EventHubConsumers} and {@link * EventHubAsyncProducer EventHubProducers} created with this instance will have their connections closed. */ @Override public void close() { if (hasConnection.getAndSet(false)) { try { final AmqpConnection connection = connectionMono.block(connectionOptions.getRetry().getTryTimeout()); if (connection != null) { connection.close(); } } catch (IOException exception) { throw logger.logExceptionAsError( new AmqpException(false, "Unable to close connection to service", exception, new ErrorContext(connectionOptions.getHost()))); } } } String eventHubName() { return this.eventHubName; } private static class ResponseMapper implements AmqpResponseMapper { @Override public EventHubProperties toEventHubProperties(Map<?, ?> amqpBody) { return new EventHubProperties( (String) amqpBody.get(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY), ((Date) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_CREATED_AT)).toInstant(), (String[]) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IDS)); } @Override public PartitionProperties toPartitionProperties(Map<?, ?> amqpBody) { return new PartitionProperties( (String) amqpBody.get(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY), (String) amqpBody.get(ManagementChannel.MANAGEMENT_PARTITION_NAME_KEY), (Long) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_BEGIN_SEQUENCE_NUMBER), (Long) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_SEQUENCE_NUMBER), (String) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_OFFSET), ((Date) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_TIME_UTC)).toInstant(), (Boolean) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IS_EMPTY)); } } }
class EventHubAsyncClient implements Closeable { /** * The name of the default consumer group in the Event Hubs service. */ public static final String DEFAULT_CONSUMER_GROUP_NAME = "$Default"; private static final String RECEIVER_ENTITY_PATH_FORMAT = "%s/ConsumerGroups/%s/Partitions/%s"; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private final ClientLogger logger = new ClientLogger(EventHubAsyncClient.class); private final String connectionId; private final Mono<EventHubConnection> connectionMono; private final AtomicBoolean hasConnection = new AtomicBoolean(false); private final ConnectionOptions connectionOptions; private final String eventHubName; private final EventHubProducerOptions defaultProducerOptions; private final EventHubConsumerOptions defaultConsumerOptions; private final TracerProvider tracerProvider; EventHubAsyncClient(ConnectionOptions connectionOptions, ReactorProvider provider, ReactorHandlerProvider handlerProvider, TracerProvider tracerProvider) { Objects.requireNonNull(connectionOptions, "'connectionOptions' cannot be null."); Objects.requireNonNull(provider, "'provider' cannot be null."); Objects.requireNonNull(handlerProvider, "'handlerProvider' cannot be null."); Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.connectionOptions = connectionOptions; this.tracerProvider = tracerProvider; this.eventHubName = connectionOptions.getEventHubName(); this.connectionId = StringUtil.getRandomString("MF"); this.connectionMono = Mono.fromCallable(() -> { return (EventHubConnection) new ReactorConnection(connectionId, connectionOptions, provider, handlerProvider, new ResponseMapper()); }).doOnSubscribe(c -> hasConnection.set(true)) .cache(); this.defaultProducerOptions = new EventHubProducerOptions() .setRetry(connectionOptions.getRetry()); this.defaultConsumerOptions = new EventHubConsumerOptions() .setRetry(connectionOptions.getRetry()) .setScheduler(connectionOptions.getScheduler()); } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getProperties() { return connectionMono .flatMap(connection -> connection .getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties)); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ @ServiceMethod(returns = ReturnType.COLLECTION) public Flux<String> getPartitionIds() { return getProperties().flatMapMany(properties -> Flux.fromArray(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionMono.flatMap( connection -> connection.getManagementNode().flatMap(node -> { return node.getPartitionProperties(partitionId); })); } /** * Creates an Event Hub producer responsible for transmitting {@link EventData} to the Event Hub, grouped together * in batches. Event data is automatically routed to an available partition. * * @return A new {@link EventHubAsyncProducer}. */ public EventHubAsyncProducer createProducer() { return createProducer(defaultProducerOptions); } /** * Creates an Event Hub producer responsible for transmitting {@link EventData} to the Event Hub, grouped together * in batches. If {@link EventHubProducerOptions * events are routed to that specific partition. Otherwise, events are automatically routed to an available * partition. * * @param options The set of options to apply when creating the producer. * @return A new {@link EventHubAsyncProducer}. * @throws NullPointerException if {@code options} is {@code null}. */ public EventHubAsyncProducer createProducer(EventHubProducerOptions options) { Objects.requireNonNull(options, "'options' cannot be null."); final EventHubProducerOptions clonedOptions = options.clone(); if (clonedOptions.getRetry() == null) { clonedOptions.setRetry(connectionOptions.getRetry()); } final String entityPath; final String linkName; if (ImplUtils.isNullOrEmpty(options.getPartitionId())) { entityPath = eventHubName; linkName = StringUtil.getRandomString("EC"); } else { entityPath = String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, options.getPartitionId()); linkName = StringUtil.getRandomString("PS"); } final Mono<AmqpSendLink> amqpLinkMono = connectionMono .flatMap(connection -> connection.createSession(entityPath)) .flatMap(session -> { logger.verbose("Creating producer for {}", entityPath); final RetryPolicy retryPolicy = RetryUtil.getRetryPolicy(clonedOptions.getRetry()); return session.createProducer(linkName, entityPath, clonedOptions.getRetry().getTryTimeout(), retryPolicy).cast(AmqpSendLink.class); }); return new EventHubAsyncProducer(amqpLinkMono, clonedOptions, tracerProvider); } /** * Creates an Event Hub consumer responsible for reading {@link EventData} from a specific Event Hub partition, as a * member of the specified consumer group, and begins reading events from the {@code eventPosition}. * * The consumer created is non-exclusive, allowing multiple consumers from the same consumer group to be actively * reading events from the partition. These non-exclusive consumers are sometimes referred to as "Non-epoch * Consumers". * * @param consumerGroup The name of the consumer group this consumer is associated with. Events are read in the * context of this group. The name of the consumer group that is created by default is {@link * * @param partitionId The identifier of the Event Hub partition. * @param eventPosition The position within the partition where the consumer should begin reading events. * @return A new {@link EventHubAsyncConsumer} that receives events from the partition at the given position. * @throws NullPointerException If {@code eventPosition}, or {@code options} is {@code null}. * @throws IllegalArgumentException If {@code consumerGroup} or {@code partitionId} is {@code null} or an empty * string. */ public EventHubAsyncConsumer createConsumer(String consumerGroup, String partitionId, EventPosition eventPosition) { return createConsumer(consumerGroup, partitionId, eventPosition, defaultConsumerOptions); } /** * Creates an Event Hub consumer responsible for reading {@link EventData} from a specific Event Hub partition, as a * member of the configured consumer group, and begins reading events from the specified {@code eventPosition}. * * <p> * A consumer may be exclusive, which asserts ownership over the partition for the consumer group to ensure that * only one consumer from that group is reading the from the partition. These exclusive consumers are sometimes * referred to as "Epoch Consumers." * * A consumer may also be non-exclusive, allowing multiple consumers from the same consumer group to be actively * reading events from the partition. These non-exclusive consumers are sometimes referred to as "Non-epoch * Consumers." * * Designating a consumer as exclusive may be specified in the {@code options}, by setting {@link * EventHubConsumerOptions * non-exclusive. * </p> * * @param consumerGroup The name of the consumer group this consumer is associated with. Events are read in the * context of this group. The name of the consumer group that is created by default is {@link * * @param partitionId The identifier of the Event Hub partition from which events will be received. * @param eventPosition The position within the partition where the consumer should begin reading events. * @param options The set of options to apply when creating the consumer. * @return An new {@link EventHubAsyncConsumer} that receives events from the partition with all configured {@link * EventHubConsumerOptions}. * @throws NullPointerException If {@code eventPosition}, {@code consumerGroup}, {@code partitionId}, or * {@code options} is {@code null}. * @throws IllegalArgumentException If {@code consumerGroup} or {@code partitionId} is an empty string. */ public EventHubAsyncConsumer createConsumer(String consumerGroup, String partitionId, EventPosition eventPosition, EventHubConsumerOptions options) { Objects.requireNonNull(eventPosition, "'eventPosition' cannot be null."); Objects.requireNonNull(options, "'options' cannot be null."); Objects.requireNonNull(consumerGroup, "'consumerGroup' cannot be null."); Objects.requireNonNull(partitionId, "'partitionId' cannot be null."); if (consumerGroup.isEmpty()) { throw logger.logExceptionAsError( new IllegalArgumentException("'consumerGroup' cannot be an empty string.")); } else if (partitionId.isEmpty()) { throw logger.logExceptionAsError( new IllegalArgumentException("'partitionId' cannot be an empty string.")); } final EventHubConsumerOptions clonedOptions = options.clone(); if (clonedOptions.getScheduler() == null) { clonedOptions.setScheduler(connectionOptions.getScheduler()); } if (clonedOptions.getRetry() == null) { clonedOptions.setRetry(connectionOptions.getRetry()); } final String linkName = StringUtil.getRandomString("PR"); final String entityPath = String.format(Locale.US, RECEIVER_ENTITY_PATH_FORMAT, eventHubName, consumerGroup, partitionId); final Mono<AmqpReceiveLink> receiveLinkMono = connectionMono.flatMap(connection -> connection.createSession(entityPath).cast(EventHubSession.class)).flatMap(session -> { logger.verbose("Creating consumer for path: {}", entityPath); final RetryPolicy retryPolicy = RetryUtil.getRetryPolicy(clonedOptions.getRetry()); return session.createConsumer(linkName, entityPath, getExpression(eventPosition), clonedOptions.getRetry().getTryTimeout(), retryPolicy, options.getOwnerLevel(), options.getIdentifier()).cast(AmqpReceiveLink.class); }); return new EventHubAsyncConsumer(receiveLinkMono, clonedOptions); } /** * Closes and disposes of connection to service. Any {@link EventHubAsyncConsumer EventHubConsumers} and {@link * EventHubAsyncProducer EventHubProducers} created with this instance will have their connections closed. */ @Override public void close() { if (hasConnection.getAndSet(false)) { try { final AmqpConnection connection = connectionMono.block(connectionOptions.getRetry().getTryTimeout()); if (connection != null) { connection.close(); } } catch (IOException exception) { throw logger.logExceptionAsError( new AmqpException(false, "Unable to close connection to service", exception, new ErrorContext(connectionOptions.getHost()))); } } } String getEventHubName() { return this.eventHubName; } private static class ResponseMapper implements AmqpResponseMapper { @Override public EventHubProperties toEventHubProperties(Map<?, ?> amqpBody) { return new EventHubProperties( (String) amqpBody.get(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY), ((Date) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_CREATED_AT)).toInstant(), (String[]) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IDS)); } @Override public PartitionProperties toPartitionProperties(Map<?, ?> amqpBody) { return new PartitionProperties( (String) amqpBody.get(ManagementChannel.MANAGEMENT_ENTITY_NAME_KEY), (String) amqpBody.get(ManagementChannel.MANAGEMENT_PARTITION_NAME_KEY), (Long) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_BEGIN_SEQUENCE_NUMBER), (Long) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_SEQUENCE_NUMBER), (String) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_OFFSET), ((Date) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_LAST_ENQUEUED_TIME_UTC)).toInstant(), (Boolean) amqpBody.get(ManagementChannel.MANAGEMENT_RESULT_PARTITION_IS_EMPTY)); } } }
cpk stands for customer provided key, and it belongs to a feature of the same name.
public BlobAsyncClient buildBlobAsyncClient() { Objects.requireNonNull(containerName); Objects.requireNonNull(blobName); HttpPipeline pipeline = super.getPipeline(); if (pipeline == null) { pipeline = super.buildPipeline(); } return new BlobAsyncClient(new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint, containerName, blobName)) .pipeline(pipeline) .build(), snapshot, cpk); }
.build(), snapshot, cpk);
public BlobAsyncClient buildBlobAsyncClient() { Objects.requireNonNull(containerName, "'containerName' cannot be null."); Objects.requireNonNull(blobName, "'blobName' cannot be null."); HttpPipeline pipeline = super.getPipeline(); if (pipeline == null) { pipeline = super.buildPipeline(); } return new BlobAsyncClient(new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint, containerName, blobName)) .pipeline(pipeline) .build(), snapshot, customerProvidedKey); }
class BlobClientBuilder extends BaseBlobClientBuilder<BlobClientBuilder> { private final ClientLogger logger = new ClientLogger(BlobClientBuilder.class); private String containerName; private String blobName; private String snapshot; /** * Creates a builder instance that is able to configure and construct {@link BlobClient BlobClients} and * {@link BlobAsyncClient BlobAsyncClients}. */ public BlobClientBuilder() { } /** * Creates a {@link BlobClient} based on options set in the Builder. BlobClients are used to perform generic blob * methods such as {@link BlobClient * properties}, use this when the blob type is unknown. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.buildBlobClient} * * @return a {@link BlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ public BlobClient buildBlobClient() { return new BlobClient(buildBlobAsyncClient()); } /** * Creates a {@link BlobAsyncClient} based on options set in the Builder. BlobAsyncClients are used to perform * generic blob methods such as {@link BlobAsyncClient * BlobAsyncClient * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.buildBlobAsyncClient} * * @return a {@link BlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.endpoint * * @param endpoint URL of the service * @return the updated BlobClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ @Override public BlobClientBuilder endpoint(String endpoint) { try { URL url = new URL(endpoint); BlobURLParts parts = BlobURLParts.parse(url); this.endpoint = parts.getScheme() + ": this.containerName = parts.getContainerName(); this.blobName = parts.getBlobName(); this.snapshot = parts.getSnapshot(); SASTokenCredential sasTokenCredential = SASTokenCredential.fromSASTokenString(parts.getSasQueryParameters().encode()); if (sasTokenCredential != null) { super.credential(sasTokenCredential); } } catch (MalformedURLException ex) { throw logger.logExceptionAsError( new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.")); } return this; } /** * Sets the name of the container this client is connecting to. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.containerName * * @param containerName the name of the container * @return the updated BlobClientBuilder object * @throws NullPointerException If {@code containerName} is {@code null} */ public BlobClientBuilder containerName(String containerName) { this.containerName = Objects.requireNonNull(containerName); return this; } /** * Sets the name of the blob this client is connecting to. * * @param blobName the name of the blob * @return the updated BlobClientBuilder object * @throws NullPointerException If {@code blobName} is {@code null} */ public BlobClientBuilder blobName(String blobName) { this.blobName = Objects.requireNonNull(blobName); return this; } /** * Sets the snapshot of the blob this client is connecting to. * * @param snapshot the snapshot identifier for the blob * @return the updated BlobClientBuilder object */ public BlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } @Override protected Class<BlobClientBuilder> getClazz() { return BlobClientBuilder.class; } }
class BlobClientBuilder extends BaseBlobClientBuilder<BlobClientBuilder> { private final ClientLogger logger = new ClientLogger(BlobClientBuilder.class); private String containerName; private String blobName; private String snapshot; /** * Creates a builder instance that is able to configure and construct {@link BlobClient BlobClients} and * {@link BlobAsyncClient BlobAsyncClients}. */ public BlobClientBuilder() { } /** * Creates a {@link BlobClient} based on options set in the Builder. BlobClients are used to perform generic blob * methods such as {@link BlobClient * properties}, use this when the blob type is unknown. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.buildBlobClient} * * @return a {@link BlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ public BlobClient buildBlobClient() { return new BlobClient(buildBlobAsyncClient()); } /** * Creates a {@link BlobAsyncClient} based on options set in the Builder. BlobAsyncClients are used to perform * generic blob methods such as {@link BlobAsyncClient * BlobAsyncClient * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.buildBlobAsyncClient} * * @return a {@link BlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.endpoint * * @param endpoint URL of the service * @return the updated BlobClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ @Override public BlobClientBuilder endpoint(String endpoint) { try { URL url = new URL(endpoint); BlobURLParts parts = BlobURLParts.parse(url); this.endpoint = parts.getScheme() + ": this.containerName = parts.getContainerName(); this.blobName = parts.getBlobName(); this.snapshot = parts.getSnapshot(); SASTokenCredential sasTokenCredential = SASTokenCredential.fromSASTokenString(parts.getSasQueryParameters().encode()); if (sasTokenCredential != null) { super.credential(sasTokenCredential); } } catch (MalformedURLException ex) { throw logger.logExceptionAsError( new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.")); } return this; } /** * Sets the name of the container this client is connecting to. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.containerName * * @param containerName the name of the container * @return the updated BlobClientBuilder object * @throws NullPointerException If {@code containerName} is {@code null} */ public BlobClientBuilder containerName(String containerName) { this.containerName = Objects.requireNonNull(containerName, "'containerName' cannot be null."); return this; } /** * Sets the name of the blob this client is connecting to. * * @param blobName the name of the blob * @return the updated BlobClientBuilder object * @throws NullPointerException If {@code blobName} is {@code null} */ public BlobClientBuilder blobName(String blobName) { this.blobName = Objects.requireNonNull(blobName, "'blobName' cannot be null."); return this; } /** * Sets the snapshot of the blob this client is connecting to. * * @param snapshot the snapshot identifier for the blob * @return the updated BlobClientBuilder object */ public BlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } @Override protected Class<BlobClientBuilder> getClazz() { return BlobClientBuilder.class; } }
We should use logger here and at other places in this class. It is available from TestSuiteBase, which gets it from DocumentClientTest
public void beforeClass() throws Exception { System.out.println("OrderbyDocumentQueryTest.beforeClass"); client = clientBuilder().build(); createdDatabase = getSharedCosmosDatabase(client); createdCollection = getSharedMultiPartitionCosmosContainer(client); System.out.println("bef: truncate collection"); truncateCollection(createdCollection); System.out.println("after: truncate collection"); List<Map<String, Object>> keyValuePropsList = new ArrayList<>(); Map<String, Object> props; System.out.println("bef: create 30 docs"); for(int i = 0; i < 30; i++) { props = new HashMap<>(); props.put("propInt", i); props.put("propStr", String.valueOf(i)); keyValuePropsList.add(props); } System.out.println("OrderbyDocumentQueryTest.beforeClass : created 30 docs"); props = new HashMap<>(); keyValuePropsList.add(props); createdDocuments = bulkInsert(createdCollection, keyValuePropsList); for(int i = 0; i < 10; i++) { Map<String, Object> p = new HashMap<>(); p.put("propScopedPartitionInt", i); CosmosItemProperties doc = getDocumentDefinition("duplicateParitionKeyValue", UUID.randomUUID().toString(), p); CosmosItemRequestOptions options = new CosmosItemRequestOptions(); options.partitionKey(new PartitionKey(doc.get("mypk"))); createdDocuments.add(createDocument(createdCollection, doc).read(options).block().properties()); } System.out.println("OrderbyDocumentQueryTest.beforeClass : created 10 docs"); numberOfPartitions = CosmosBridgeInternal.getAsyncDocumentClient(client) .readPartitionKeyRanges("dbs/" + createdDatabase.id() + "/colls/" + createdCollection.id(), null) .flatMap(p -> Flux.fromIterable(p.results())).collectList().single().block().size(); System.out.println("numberOfPartitions = " + numberOfPartitions); waitIfNeededForReplicasToCatchUp(clientBuilder()); }
System.out.println("OrderbyDocumentQueryTest.beforeClass");
public void beforeClass() throws Exception { client = clientBuilder().build(); createdDatabase = getSharedCosmosDatabase(client); createdCollection = getSharedMultiPartitionCosmosContainer(client); truncateCollection(createdCollection); List<Map<String, Object>> keyValuePropsList = new ArrayList<>(); Map<String, Object> props; for(int i = 0; i < 30; i++) { props = new HashMap<>(); props.put("propInt", i); props.put("propStr", String.valueOf(i)); keyValuePropsList.add(props); } props = new HashMap<>(); keyValuePropsList.add(props); createdDocuments = bulkInsert(createdCollection, keyValuePropsList); for(int i = 0; i < 10; i++) { Map<String, Object> p = new HashMap<>(); p.put("propScopedPartitionInt", i); CosmosItemProperties doc = getDocumentDefinition("duplicateParitionKeyValue", UUID.randomUUID().toString(), p); CosmosItemRequestOptions options = new CosmosItemRequestOptions(); options.partitionKey(new PartitionKey(doc.get("mypk"))); createdDocuments.add(createDocument(createdCollection, doc).read(options).block().properties()); } numberOfPartitions = CosmosBridgeInternal.getAsyncDocumentClient(client) .readPartitionKeyRanges("dbs/" + createdDatabase.id() + "/colls/" + createdCollection.id(), null) .flatMap(p -> Flux.fromIterable(p.results())).collectList().single().block().size(); waitIfNeededForReplicasToCatchUp(clientBuilder()); }
class OrderbyDocumentQueryTest extends TestSuiteBase { private final double minQueryRequestChargePerPartition = 2.0; private CosmosClient client; private CosmosContainer createdCollection; private CosmosDatabase createdDatabase; private List<CosmosItemProperties> createdDocuments = new ArrayList<>(); private int numberOfPartitions; @Factory(dataProvider = "clientBuildersWithDirect") public OrderbyDocumentQueryTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider") public void queryDocumentsValidateContent(boolean qmEnabled) throws Exception { CosmosItemProperties expectedDocument = createdDocuments.get(0); String query = String.format("SELECT * from root r where r.propStr = '%s'" + " ORDER BY r.propInt" , expectedDocument.getString("propStr")); FeedOptions options = new FeedOptions(); options.enableCrossPartitionQuery(true); options.populateQueryMetrics(qmEnabled); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); List<String> expectedResourceIds = new ArrayList<>(); expectedResourceIds.add(expectedDocument.resourceId()); Map<String, ResourceValidator<CosmosItemProperties>> resourceIDToValidator = new HashMap<>(); resourceIDToValidator.put(expectedDocument.resourceId(), new ResourceValidator.Builder<CosmosItemProperties>().areEqual(expectedDocument).build()); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .numberOfPages(1) .containsExactly(expectedResourceIds) .validateAllResources(resourceIDToValidator) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>().hasRequestChargeHeader().build()) .hasValidQueryMetrics(qmEnabled) .build(); validateQuerySuccess(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryDocuments_NoResults() throws Exception { String query = "SELECT * from root r where r.id = '2' ORDER BY r.propInt"; FeedOptions options = new FeedOptions(); options.enableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(new ArrayList<>()) .numberOfPages(1) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .hasRequestChargeHeader().build()) .build(); validateQuerySuccess(queryObservable, validator); } @DataProvider(name = "sortOrder") public Object[][] sortOrder() { return new Object[][] { { "ASC" }, {"DESC"} }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderBy(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propInt %s", sortOrder); FeedOptions options = new FeedOptions(); options.enableCrossPartitionQuery(true); int pageSize = 3; options.maxItemCount(pageSize); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> d.getInt("propInt"), validatorComparator); if ("DESC".equals(sortOrder)) { Collections.reverse(expectedResourceIds); } int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByInt() throws Exception { String query = "SELECT * FROM r ORDER BY r.propInt"; FeedOptions options = new FeedOptions(); options.enableCrossPartitionQuery(true); int pageSize = 3; options.maxItemCount(pageSize); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> d.getInt("propInt"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByString() throws Exception { String query = "SELECT * FROM r ORDER BY r.propStr"; FeedOptions options = new FeedOptions(); options.enableCrossPartitionQuery(true); int pageSize = 3; options.maxItemCount(pageSize); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); Comparator<String> validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propStr", d -> d.getString("propStr"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable, validator); } @DataProvider(name = "topValue") public Object[][] topValueParameter() { return new Object[][] { { 0 }, { 1 }, { 5 }, { createdDocuments.size() - 1 }, { createdDocuments.size() }, { createdDocuments.size() + 1 }, { 2 * createdDocuments.size() } }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "topValue") public void queryOrderWithTop(int topValue) throws Exception { String query = String.format("SELECT TOP %d * FROM r ORDER BY r.propInt", topValue); FeedOptions options = new FeedOptions(); options.enableCrossPartitionQuery(true); int pageSize = 3; options.maxItemCount(pageSize); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> d.getInt("propInt"), validatorComparator) .stream().limit(topValue).collect(Collectors.toList()); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * (topValue > 0 ? minQueryRequestChargePerPartition : 1)) .build(); validateQuerySuccess(queryObservable, validator); } private <T> List<String> sortDocumentsAndCollectResourceIds(String propName, Function<CosmosItemProperties, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .filter(d -> d.getMap().containsKey(propName)) .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(Resource::resourceId).collect(Collectors.toList()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryScopedToSinglePartition_StartWithContinuationToken() throws Exception { String query = "SELECT * FROM r ORDER BY r.propScopedPartitionInt ASC"; FeedOptions options = new FeedOptions(); options.partitionKey(new PartitionKey("duplicateParitionKeyValue")); options.maxItemCount(3); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); TestSubscriber<FeedResponse<CosmosItemProperties>> subscriber = new TestSubscriber<>(); queryObservable.take(1).subscribe(subscriber); subscriber.awaitTerminalEvent(); subscriber.assertComplete(); subscriber.assertNoErrors(); assertThat(subscriber.valueCount()).isEqualTo(1); FeedResponse<CosmosItemProperties> page = (FeedResponse<CosmosItemProperties>) subscriber.getEvents().get(0).get(0); assertThat(page.results()).hasSize(3); assertThat(page.continuationToken()).isNotEmpty(); options.requestContinuation(page.continuationToken()); queryObservable = createdCollection.queryItems(query, options); List<CosmosItemProperties> expectedDocs = createdDocuments.stream() .filter(d -> (StringUtils.equals("duplicateParitionKeyValue", d.getString("mypk")))) .filter(d -> (d.getInt("propScopedPartitionInt") > 2)).collect(Collectors.toList()); int expectedPageSize = (expectedDocs.size() + options.maxItemCount() - 1) / options.maxItemCount(); assertThat(expectedDocs).hasSize(10 - 3); FeedResponseListValidator<CosmosItemProperties> validator = null; validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(expectedDocs.stream() .sorted((e1, e2) -> Integer.compare(e1.getInt("propScopedPartitionInt"), e2.getInt("propScopedPartitionInt"))) .map(d -> d.resourceId()).collect(Collectors.toList())) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .requestChargeGreaterThanOrEqualTo(1.0).build()) .build(); validateQuerySuccess(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void orderByContinuationTokenRoundTrip() throws Exception { { OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); String serialized = orderByContinuationToken.toString(); ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse(serialized, outOrderByContinuationToken)).isTrue(); OrderByContinuationToken deserialized = outOrderByContinuationToken.v; CompositeContinuationToken compositeContinuationToken = deserialized.getCompositeContinuationToken(); String token = compositeContinuationToken.getToken(); Range<String> range = compositeContinuationToken.getRange(); assertThat(token).isEqualTo("asdf"); assertThat(range.getMin()).isEqualTo("A"); assertThat(range.getMax()).isEqualTo("D"); assertThat(range.isMinInclusive()).isEqualTo(false); assertThat(range.isMaxInclusive()).isEqualTo(true); QueryItem[] orderByItems = deserialized.getOrderByItems(); assertThat(orderByItems).isNotNull(); assertThat(orderByItems.length).isEqualTo(1); assertThat(orderByItems[0].getItem()).isEqualTo(42); String rid = deserialized.getRid(); assertThat(rid).isEqualTo("rid"); boolean inclusive = deserialized.getInclusive(); assertThat(inclusive).isEqualTo(false); } { ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse("{\"property\" : \"Not a valid Order By Token\"}", outOrderByContinuationToken)).isFalse(); } } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder", retryAnalyzer = RetryAnalyzer.class) public void queryDocumentsWithOrderByContinuationTokensInteger(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.propInt %s", sortOrder); Comparator<Integer> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<Integer> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> d.getInt("propInt"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100}, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<String> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> d.getString("id"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithInvalidOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> validatorComparator; if(sortOrder.equals("ASC")) { validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); }else{ validatorComparator = Comparator.nullsFirst(Comparator.<String>reverseOrder()); } List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> d.getString("id"), validatorComparator); this.assertInvalidContinuationToken(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } public CosmosItemProperties createDocument(CosmosContainer cosmosContainer, Map<String, Object> keyValueProps) throws CosmosClientException { CosmosItemProperties docDefinition = getDocumentDefinition(keyValueProps); return cosmosContainer.createItem(docDefinition).block().properties(); } public List<CosmosItemProperties> bulkInsert(CosmosContainer cosmosContainer, List<Map<String, Object>> keyValuePropsList) { ArrayList<CosmosItemProperties> result = new ArrayList<CosmosItemProperties>(); for(Map<String, Object> keyValueProps: keyValuePropsList) { CosmosItemProperties docDefinition = getDocumentDefinition(keyValueProps); result.add(docDefinition); } return bulkInsertBlocking(cosmosContainer, result); } @BeforeMethod(groups = { "simple" }) public void beforeMethod() throws Exception { TimeUnit.SECONDS.sleep(10); } @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private void assertInvalidContinuationToken(String query, int[] pageSize, List<String> expectedIds) { String requestContinuation = null; do { FeedOptions options = new FeedOptions(); options.maxItemCount(1); options.enableCrossPartitionQuery(true); options.maxDegreeOfParallelism(2); OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); options.requestContinuation(orderByContinuationToken.toString()); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); TestSubscriber<FeedResponse<CosmosItemProperties>> testSubscriber = new TestSubscriber<>(); queryObservable.subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertError(CosmosClientException.class); } while (requestContinuation != null); } private void queryWithContinuationTokensAndPageSizes(String query, int[] pageSizes, List<String> expectedIds) { for (int pageSize : pageSizes) { List<CosmosItemProperties> receivedDocuments = this.queryWithContinuationTokens(query, pageSize); List<String> actualIds = new ArrayList<String>(); for (CosmosItemProperties document : receivedDocuments) { actualIds.add(document.resourceId()); } assertThat(actualIds).containsExactlyElementsOf(expectedIds); } } private List<CosmosItemProperties> queryWithContinuationTokens(String query, int pageSize) { String requestContinuation = null; List<String> continuationTokens = new ArrayList<String>(); List<CosmosItemProperties> receivedDocuments = new ArrayList<CosmosItemProperties>(); do { FeedOptions options = new FeedOptions(); options.maxItemCount(pageSize); options.enableCrossPartitionQuery(true); options.maxDegreeOfParallelism(2); options.requestContinuation(requestContinuation); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); TestSubscriber<FeedResponse<CosmosItemProperties>> testSubscriber = new TestSubscriber<>(); queryObservable.subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertNoErrors(); testSubscriber.assertComplete(); FeedResponse<CosmosItemProperties> firstPage = (FeedResponse<CosmosItemProperties>) testSubscriber.getEvents().get(0).get(0); requestContinuation = firstPage.continuationToken(); receivedDocuments.addAll(firstPage.results()); continuationTokens.add(requestContinuation); } while (requestContinuation != null); return receivedDocuments; } private static CosmosItemProperties getDocumentDefinition(String partitionKey, String id, Map<String, Object> keyValuePair) { StringBuilder sb = new StringBuilder(); sb.append("{\n"); for(String key: keyValuePair.keySet()) { Object val = keyValuePair.get(key); sb.append(" "); sb.append("\"").append(key).append("\"").append(" :" ); if (val == null) { sb.append("null"); } else { sb.append(toJson(val)); } sb.append(",\n"); } sb.append(String.format(" \"id\": \"%s\",\n", id)); sb.append(String.format(" \"mypk\": \"%s\"\n", partitionKey)); sb.append("}"); return new CosmosItemProperties(sb.toString()); } private static CosmosItemProperties getDocumentDefinition(Map<String, Object> keyValuePair) { String uuid = UUID.randomUUID().toString(); return getDocumentDefinition(uuid, uuid, keyValuePair); } private static String toJson(Object object){ try { return Utils.getSimpleObjectMapper().writeValueAsString(object); } catch (JsonProcessingException e) { throw new IllegalStateException(e); } } }
class OrderbyDocumentQueryTest extends TestSuiteBase { private final double minQueryRequestChargePerPartition = 2.0; private CosmosClient client; private CosmosContainer createdCollection; private CosmosDatabase createdDatabase; private List<CosmosItemProperties> createdDocuments = new ArrayList<>(); private int numberOfPartitions; @Factory(dataProvider = "clientBuildersWithDirect") public OrderbyDocumentQueryTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider") public void queryDocumentsValidateContent(boolean qmEnabled) throws Exception { CosmosItemProperties expectedDocument = createdDocuments.get(0); String query = String.format("SELECT * from root r where r.propStr = '%s'" + " ORDER BY r.propInt" , expectedDocument.getString("propStr")); FeedOptions options = new FeedOptions(); options.enableCrossPartitionQuery(true); options.populateQueryMetrics(qmEnabled); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); List<String> expectedResourceIds = new ArrayList<>(); expectedResourceIds.add(expectedDocument.resourceId()); Map<String, ResourceValidator<CosmosItemProperties>> resourceIDToValidator = new HashMap<>(); resourceIDToValidator.put(expectedDocument.resourceId(), new ResourceValidator.Builder<CosmosItemProperties>().areEqual(expectedDocument).build()); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .numberOfPages(1) .containsExactly(expectedResourceIds) .validateAllResources(resourceIDToValidator) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>().hasRequestChargeHeader().build()) .hasValidQueryMetrics(qmEnabled) .build(); validateQuerySuccess(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryDocuments_NoResults() throws Exception { String query = "SELECT * from root r where r.id = '2' ORDER BY r.propInt"; FeedOptions options = new FeedOptions(); options.enableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(new ArrayList<>()) .numberOfPages(1) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .hasRequestChargeHeader().build()) .build(); validateQuerySuccess(queryObservable, validator); } @DataProvider(name = "sortOrder") public Object[][] sortOrder() { return new Object[][] { { "ASC" }, {"DESC"} }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderBy(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propInt %s", sortOrder); FeedOptions options = new FeedOptions(); options.enableCrossPartitionQuery(true); int pageSize = 3; options.maxItemCount(pageSize); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> d.getInt("propInt"), validatorComparator); if ("DESC".equals(sortOrder)) { Collections.reverse(expectedResourceIds); } int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByInt() throws Exception { String query = "SELECT * FROM r ORDER BY r.propInt"; FeedOptions options = new FeedOptions(); options.enableCrossPartitionQuery(true); int pageSize = 3; options.maxItemCount(pageSize); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> d.getInt("propInt"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByString() throws Exception { String query = "SELECT * FROM r ORDER BY r.propStr"; FeedOptions options = new FeedOptions(); options.enableCrossPartitionQuery(true); int pageSize = 3; options.maxItemCount(pageSize); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); Comparator<String> validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propStr", d -> d.getString("propStr"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable, validator); } @DataProvider(name = "topValue") public Object[][] topValueParameter() { return new Object[][] { { 0 }, { 1 }, { 5 }, { createdDocuments.size() - 1 }, { createdDocuments.size() }, { createdDocuments.size() + 1 }, { 2 * createdDocuments.size() } }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "topValue") public void queryOrderWithTop(int topValue) throws Exception { String query = String.format("SELECT TOP %d * FROM r ORDER BY r.propInt", topValue); FeedOptions options = new FeedOptions(); options.enableCrossPartitionQuery(true); int pageSize = 3; options.maxItemCount(pageSize); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> d.getInt("propInt"), validatorComparator) .stream().limit(topValue).collect(Collectors.toList()); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * (topValue > 0 ? minQueryRequestChargePerPartition : 1)) .build(); validateQuerySuccess(queryObservable, validator); } private <T> List<String> sortDocumentsAndCollectResourceIds(String propName, Function<CosmosItemProperties, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .filter(d -> d.getMap().containsKey(propName)) .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(Resource::resourceId).collect(Collectors.toList()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryScopedToSinglePartition_StartWithContinuationToken() throws Exception { String query = "SELECT * FROM r ORDER BY r.propScopedPartitionInt ASC"; FeedOptions options = new FeedOptions(); options.partitionKey(new PartitionKey("duplicateParitionKeyValue")); options.maxItemCount(3); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); TestSubscriber<FeedResponse<CosmosItemProperties>> subscriber = new TestSubscriber<>(); queryObservable.take(1).subscribe(subscriber); subscriber.awaitTerminalEvent(); subscriber.assertComplete(); subscriber.assertNoErrors(); assertThat(subscriber.valueCount()).isEqualTo(1); FeedResponse<CosmosItemProperties> page = (FeedResponse<CosmosItemProperties>) subscriber.getEvents().get(0).get(0); assertThat(page.results()).hasSize(3); assertThat(page.continuationToken()).isNotEmpty(); options.requestContinuation(page.continuationToken()); queryObservable = createdCollection.queryItems(query, options); List<CosmosItemProperties> expectedDocs = createdDocuments.stream() .filter(d -> (StringUtils.equals("duplicateParitionKeyValue", d.getString("mypk")))) .filter(d -> (d.getInt("propScopedPartitionInt") > 2)).collect(Collectors.toList()); int expectedPageSize = (expectedDocs.size() + options.maxItemCount() - 1) / options.maxItemCount(); assertThat(expectedDocs).hasSize(10 - 3); FeedResponseListValidator<CosmosItemProperties> validator = null; validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(expectedDocs.stream() .sorted((e1, e2) -> Integer.compare(e1.getInt("propScopedPartitionInt"), e2.getInt("propScopedPartitionInt"))) .map(d -> d.resourceId()).collect(Collectors.toList())) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .requestChargeGreaterThanOrEqualTo(1.0).build()) .build(); validateQuerySuccess(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void orderByContinuationTokenRoundTrip() throws Exception { { OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); String serialized = orderByContinuationToken.toString(); ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse(serialized, outOrderByContinuationToken)).isTrue(); OrderByContinuationToken deserialized = outOrderByContinuationToken.v; CompositeContinuationToken compositeContinuationToken = deserialized.getCompositeContinuationToken(); String token = compositeContinuationToken.getToken(); Range<String> range = compositeContinuationToken.getRange(); assertThat(token).isEqualTo("asdf"); assertThat(range.getMin()).isEqualTo("A"); assertThat(range.getMax()).isEqualTo("D"); assertThat(range.isMinInclusive()).isEqualTo(false); assertThat(range.isMaxInclusive()).isEqualTo(true); QueryItem[] orderByItems = deserialized.getOrderByItems(); assertThat(orderByItems).isNotNull(); assertThat(orderByItems.length).isEqualTo(1); assertThat(orderByItems[0].getItem()).isEqualTo(42); String rid = deserialized.getRid(); assertThat(rid).isEqualTo("rid"); boolean inclusive = deserialized.getInclusive(); assertThat(inclusive).isEqualTo(false); } { ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse("{\"property\" : \"Not a valid Order By Token\"}", outOrderByContinuationToken)).isFalse(); } } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder", retryAnalyzer = RetryAnalyzer.class) public void queryDocumentsWithOrderByContinuationTokensInteger(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.propInt %s", sortOrder); Comparator<Integer> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<Integer> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> d.getInt("propInt"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100}, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<String> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> d.getString("id"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithInvalidOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> validatorComparator; if(sortOrder.equals("ASC")) { validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); }else{ validatorComparator = Comparator.nullsFirst(Comparator.<String>reverseOrder()); } List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> d.getString("id"), validatorComparator); this.assertInvalidContinuationToken(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } public CosmosItemProperties createDocument(CosmosContainer cosmosContainer, Map<String, Object> keyValueProps) throws CosmosClientException { CosmosItemProperties docDefinition = getDocumentDefinition(keyValueProps); return cosmosContainer.createItem(docDefinition).block().properties(); } public List<CosmosItemProperties> bulkInsert(CosmosContainer cosmosContainer, List<Map<String, Object>> keyValuePropsList) { ArrayList<CosmosItemProperties> result = new ArrayList<CosmosItemProperties>(); for(Map<String, Object> keyValueProps: keyValuePropsList) { CosmosItemProperties docDefinition = getDocumentDefinition(keyValueProps); result.add(docDefinition); } return bulkInsertBlocking(cosmosContainer, result); } @BeforeMethod(groups = { "simple" }) public void beforeMethod() throws Exception { TimeUnit.SECONDS.sleep(10); } @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private void assertInvalidContinuationToken(String query, int[] pageSize, List<String> expectedIds) { String requestContinuation = null; do { FeedOptions options = new FeedOptions(); options.maxItemCount(1); options.enableCrossPartitionQuery(true); options.maxDegreeOfParallelism(2); OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); options.requestContinuation(orderByContinuationToken.toString()); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); TestSubscriber<FeedResponse<CosmosItemProperties>> testSubscriber = new TestSubscriber<>(); queryObservable.subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertError(CosmosClientException.class); } while (requestContinuation != null); } private void queryWithContinuationTokensAndPageSizes(String query, int[] pageSizes, List<String> expectedIds) { for (int pageSize : pageSizes) { List<CosmosItemProperties> receivedDocuments = this.queryWithContinuationTokens(query, pageSize); List<String> actualIds = new ArrayList<String>(); for (CosmosItemProperties document : receivedDocuments) { actualIds.add(document.resourceId()); } assertThat(actualIds).containsExactlyElementsOf(expectedIds); } } private List<CosmosItemProperties> queryWithContinuationTokens(String query, int pageSize) { String requestContinuation = null; List<String> continuationTokens = new ArrayList<String>(); List<CosmosItemProperties> receivedDocuments = new ArrayList<CosmosItemProperties>(); do { FeedOptions options = new FeedOptions(); options.maxItemCount(pageSize); options.enableCrossPartitionQuery(true); options.maxDegreeOfParallelism(2); options.requestContinuation(requestContinuation); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); TestSubscriber<FeedResponse<CosmosItemProperties>> testSubscriber = new TestSubscriber<>(); queryObservable.subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertNoErrors(); testSubscriber.assertComplete(); FeedResponse<CosmosItemProperties> firstPage = (FeedResponse<CosmosItemProperties>) testSubscriber.getEvents().get(0).get(0); requestContinuation = firstPage.continuationToken(); receivedDocuments.addAll(firstPage.results()); continuationTokens.add(requestContinuation); } while (requestContinuation != null); return receivedDocuments; } private static CosmosItemProperties getDocumentDefinition(String partitionKey, String id, Map<String, Object> keyValuePair) { StringBuilder sb = new StringBuilder(); sb.append("{\n"); for(String key: keyValuePair.keySet()) { Object val = keyValuePair.get(key); sb.append(" "); sb.append("\"").append(key).append("\"").append(" :" ); if (val == null) { sb.append("null"); } else { sb.append(toJson(val)); } sb.append(",\n"); } sb.append(String.format(" \"id\": \"%s\",\n", id)); sb.append(String.format(" \"mypk\": \"%s\"\n", partitionKey)); sb.append("}"); return new CosmosItemProperties(sb.toString()); } private static CosmosItemProperties getDocumentDefinition(Map<String, Object> keyValuePair) { String uuid = UUID.randomUUID().toString(); return getDocumentDefinition(uuid, uuid, keyValuePair); } private static String toJson(Object object){ try { return Utils.getSimpleObjectMapper().writeValueAsString(object); } catch (JsonProcessingException e) { throw new IllegalStateException(e); } } }
Still under debug. I dont need these and I will remove
public void beforeClass() throws Exception { System.out.println("OrderbyDocumentQueryTest.beforeClass"); client = clientBuilder().build(); createdDatabase = getSharedCosmosDatabase(client); createdCollection = getSharedMultiPartitionCosmosContainer(client); System.out.println("bef: truncate collection"); truncateCollection(createdCollection); System.out.println("after: truncate collection"); List<Map<String, Object>> keyValuePropsList = new ArrayList<>(); Map<String, Object> props; System.out.println("bef: create 30 docs"); for(int i = 0; i < 30; i++) { props = new HashMap<>(); props.put("propInt", i); props.put("propStr", String.valueOf(i)); keyValuePropsList.add(props); } System.out.println("OrderbyDocumentQueryTest.beforeClass : created 30 docs"); props = new HashMap<>(); keyValuePropsList.add(props); createdDocuments = bulkInsert(createdCollection, keyValuePropsList); for(int i = 0; i < 10; i++) { Map<String, Object> p = new HashMap<>(); p.put("propScopedPartitionInt", i); CosmosItemProperties doc = getDocumentDefinition("duplicateParitionKeyValue", UUID.randomUUID().toString(), p); CosmosItemRequestOptions options = new CosmosItemRequestOptions(); options.partitionKey(new PartitionKey(doc.get("mypk"))); createdDocuments.add(createDocument(createdCollection, doc).read(options).block().properties()); } System.out.println("OrderbyDocumentQueryTest.beforeClass : created 10 docs"); numberOfPartitions = CosmosBridgeInternal.getAsyncDocumentClient(client) .readPartitionKeyRanges("dbs/" + createdDatabase.id() + "/colls/" + createdCollection.id(), null) .flatMap(p -> Flux.fromIterable(p.results())).collectList().single().block().size(); System.out.println("numberOfPartitions = " + numberOfPartitions); waitIfNeededForReplicasToCatchUp(clientBuilder()); }
System.out.println("OrderbyDocumentQueryTest.beforeClass");
public void beforeClass() throws Exception { client = clientBuilder().build(); createdDatabase = getSharedCosmosDatabase(client); createdCollection = getSharedMultiPartitionCosmosContainer(client); truncateCollection(createdCollection); List<Map<String, Object>> keyValuePropsList = new ArrayList<>(); Map<String, Object> props; for(int i = 0; i < 30; i++) { props = new HashMap<>(); props.put("propInt", i); props.put("propStr", String.valueOf(i)); keyValuePropsList.add(props); } props = new HashMap<>(); keyValuePropsList.add(props); createdDocuments = bulkInsert(createdCollection, keyValuePropsList); for(int i = 0; i < 10; i++) { Map<String, Object> p = new HashMap<>(); p.put("propScopedPartitionInt", i); CosmosItemProperties doc = getDocumentDefinition("duplicateParitionKeyValue", UUID.randomUUID().toString(), p); CosmosItemRequestOptions options = new CosmosItemRequestOptions(); options.partitionKey(new PartitionKey(doc.get("mypk"))); createdDocuments.add(createDocument(createdCollection, doc).read(options).block().properties()); } numberOfPartitions = CosmosBridgeInternal.getAsyncDocumentClient(client) .readPartitionKeyRanges("dbs/" + createdDatabase.id() + "/colls/" + createdCollection.id(), null) .flatMap(p -> Flux.fromIterable(p.results())).collectList().single().block().size(); waitIfNeededForReplicasToCatchUp(clientBuilder()); }
class OrderbyDocumentQueryTest extends TestSuiteBase { private final double minQueryRequestChargePerPartition = 2.0; private CosmosClient client; private CosmosContainer createdCollection; private CosmosDatabase createdDatabase; private List<CosmosItemProperties> createdDocuments = new ArrayList<>(); private int numberOfPartitions; @Factory(dataProvider = "clientBuildersWithDirect") public OrderbyDocumentQueryTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider") public void queryDocumentsValidateContent(boolean qmEnabled) throws Exception { CosmosItemProperties expectedDocument = createdDocuments.get(0); String query = String.format("SELECT * from root r where r.propStr = '%s'" + " ORDER BY r.propInt" , expectedDocument.getString("propStr")); FeedOptions options = new FeedOptions(); options.enableCrossPartitionQuery(true); options.populateQueryMetrics(qmEnabled); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); List<String> expectedResourceIds = new ArrayList<>(); expectedResourceIds.add(expectedDocument.resourceId()); Map<String, ResourceValidator<CosmosItemProperties>> resourceIDToValidator = new HashMap<>(); resourceIDToValidator.put(expectedDocument.resourceId(), new ResourceValidator.Builder<CosmosItemProperties>().areEqual(expectedDocument).build()); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .numberOfPages(1) .containsExactly(expectedResourceIds) .validateAllResources(resourceIDToValidator) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>().hasRequestChargeHeader().build()) .hasValidQueryMetrics(qmEnabled) .build(); validateQuerySuccess(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryDocuments_NoResults() throws Exception { String query = "SELECT * from root r where r.id = '2' ORDER BY r.propInt"; FeedOptions options = new FeedOptions(); options.enableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(new ArrayList<>()) .numberOfPages(1) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .hasRequestChargeHeader().build()) .build(); validateQuerySuccess(queryObservable, validator); } @DataProvider(name = "sortOrder") public Object[][] sortOrder() { return new Object[][] { { "ASC" }, {"DESC"} }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderBy(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propInt %s", sortOrder); FeedOptions options = new FeedOptions(); options.enableCrossPartitionQuery(true); int pageSize = 3; options.maxItemCount(pageSize); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> d.getInt("propInt"), validatorComparator); if ("DESC".equals(sortOrder)) { Collections.reverse(expectedResourceIds); } int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByInt() throws Exception { String query = "SELECT * FROM r ORDER BY r.propInt"; FeedOptions options = new FeedOptions(); options.enableCrossPartitionQuery(true); int pageSize = 3; options.maxItemCount(pageSize); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> d.getInt("propInt"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByString() throws Exception { String query = "SELECT * FROM r ORDER BY r.propStr"; FeedOptions options = new FeedOptions(); options.enableCrossPartitionQuery(true); int pageSize = 3; options.maxItemCount(pageSize); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); Comparator<String> validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propStr", d -> d.getString("propStr"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable, validator); } @DataProvider(name = "topValue") public Object[][] topValueParameter() { return new Object[][] { { 0 }, { 1 }, { 5 }, { createdDocuments.size() - 1 }, { createdDocuments.size() }, { createdDocuments.size() + 1 }, { 2 * createdDocuments.size() } }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "topValue") public void queryOrderWithTop(int topValue) throws Exception { String query = String.format("SELECT TOP %d * FROM r ORDER BY r.propInt", topValue); FeedOptions options = new FeedOptions(); options.enableCrossPartitionQuery(true); int pageSize = 3; options.maxItemCount(pageSize); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> d.getInt("propInt"), validatorComparator) .stream().limit(topValue).collect(Collectors.toList()); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * (topValue > 0 ? minQueryRequestChargePerPartition : 1)) .build(); validateQuerySuccess(queryObservable, validator); } private <T> List<String> sortDocumentsAndCollectResourceIds(String propName, Function<CosmosItemProperties, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .filter(d -> d.getMap().containsKey(propName)) .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(Resource::resourceId).collect(Collectors.toList()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryScopedToSinglePartition_StartWithContinuationToken() throws Exception { String query = "SELECT * FROM r ORDER BY r.propScopedPartitionInt ASC"; FeedOptions options = new FeedOptions(); options.partitionKey(new PartitionKey("duplicateParitionKeyValue")); options.maxItemCount(3); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); TestSubscriber<FeedResponse<CosmosItemProperties>> subscriber = new TestSubscriber<>(); queryObservable.take(1).subscribe(subscriber); subscriber.awaitTerminalEvent(); subscriber.assertComplete(); subscriber.assertNoErrors(); assertThat(subscriber.valueCount()).isEqualTo(1); FeedResponse<CosmosItemProperties> page = (FeedResponse<CosmosItemProperties>) subscriber.getEvents().get(0).get(0); assertThat(page.results()).hasSize(3); assertThat(page.continuationToken()).isNotEmpty(); options.requestContinuation(page.continuationToken()); queryObservable = createdCollection.queryItems(query, options); List<CosmosItemProperties> expectedDocs = createdDocuments.stream() .filter(d -> (StringUtils.equals("duplicateParitionKeyValue", d.getString("mypk")))) .filter(d -> (d.getInt("propScopedPartitionInt") > 2)).collect(Collectors.toList()); int expectedPageSize = (expectedDocs.size() + options.maxItemCount() - 1) / options.maxItemCount(); assertThat(expectedDocs).hasSize(10 - 3); FeedResponseListValidator<CosmosItemProperties> validator = null; validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(expectedDocs.stream() .sorted((e1, e2) -> Integer.compare(e1.getInt("propScopedPartitionInt"), e2.getInt("propScopedPartitionInt"))) .map(d -> d.resourceId()).collect(Collectors.toList())) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .requestChargeGreaterThanOrEqualTo(1.0).build()) .build(); validateQuerySuccess(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void orderByContinuationTokenRoundTrip() throws Exception { { OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); String serialized = orderByContinuationToken.toString(); ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse(serialized, outOrderByContinuationToken)).isTrue(); OrderByContinuationToken deserialized = outOrderByContinuationToken.v; CompositeContinuationToken compositeContinuationToken = deserialized.getCompositeContinuationToken(); String token = compositeContinuationToken.getToken(); Range<String> range = compositeContinuationToken.getRange(); assertThat(token).isEqualTo("asdf"); assertThat(range.getMin()).isEqualTo("A"); assertThat(range.getMax()).isEqualTo("D"); assertThat(range.isMinInclusive()).isEqualTo(false); assertThat(range.isMaxInclusive()).isEqualTo(true); QueryItem[] orderByItems = deserialized.getOrderByItems(); assertThat(orderByItems).isNotNull(); assertThat(orderByItems.length).isEqualTo(1); assertThat(orderByItems[0].getItem()).isEqualTo(42); String rid = deserialized.getRid(); assertThat(rid).isEqualTo("rid"); boolean inclusive = deserialized.getInclusive(); assertThat(inclusive).isEqualTo(false); } { ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse("{\"property\" : \"Not a valid Order By Token\"}", outOrderByContinuationToken)).isFalse(); } } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder", retryAnalyzer = RetryAnalyzer.class) public void queryDocumentsWithOrderByContinuationTokensInteger(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.propInt %s", sortOrder); Comparator<Integer> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<Integer> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> d.getInt("propInt"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100}, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<String> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> d.getString("id"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithInvalidOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> validatorComparator; if(sortOrder.equals("ASC")) { validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); }else{ validatorComparator = Comparator.nullsFirst(Comparator.<String>reverseOrder()); } List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> d.getString("id"), validatorComparator); this.assertInvalidContinuationToken(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } public CosmosItemProperties createDocument(CosmosContainer cosmosContainer, Map<String, Object> keyValueProps) throws CosmosClientException { CosmosItemProperties docDefinition = getDocumentDefinition(keyValueProps); return cosmosContainer.createItem(docDefinition).block().properties(); } public List<CosmosItemProperties> bulkInsert(CosmosContainer cosmosContainer, List<Map<String, Object>> keyValuePropsList) { ArrayList<CosmosItemProperties> result = new ArrayList<CosmosItemProperties>(); for(Map<String, Object> keyValueProps: keyValuePropsList) { CosmosItemProperties docDefinition = getDocumentDefinition(keyValueProps); result.add(docDefinition); } return bulkInsertBlocking(cosmosContainer, result); } @BeforeMethod(groups = { "simple" }) public void beforeMethod() throws Exception { TimeUnit.SECONDS.sleep(10); } @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private void assertInvalidContinuationToken(String query, int[] pageSize, List<String> expectedIds) { String requestContinuation = null; do { FeedOptions options = new FeedOptions(); options.maxItemCount(1); options.enableCrossPartitionQuery(true); options.maxDegreeOfParallelism(2); OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); options.requestContinuation(orderByContinuationToken.toString()); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); TestSubscriber<FeedResponse<CosmosItemProperties>> testSubscriber = new TestSubscriber<>(); queryObservable.subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertError(CosmosClientException.class); } while (requestContinuation != null); } private void queryWithContinuationTokensAndPageSizes(String query, int[] pageSizes, List<String> expectedIds) { for (int pageSize : pageSizes) { List<CosmosItemProperties> receivedDocuments = this.queryWithContinuationTokens(query, pageSize); List<String> actualIds = new ArrayList<String>(); for (CosmosItemProperties document : receivedDocuments) { actualIds.add(document.resourceId()); } assertThat(actualIds).containsExactlyElementsOf(expectedIds); } } private List<CosmosItemProperties> queryWithContinuationTokens(String query, int pageSize) { String requestContinuation = null; List<String> continuationTokens = new ArrayList<String>(); List<CosmosItemProperties> receivedDocuments = new ArrayList<CosmosItemProperties>(); do { FeedOptions options = new FeedOptions(); options.maxItemCount(pageSize); options.enableCrossPartitionQuery(true); options.maxDegreeOfParallelism(2); options.requestContinuation(requestContinuation); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); TestSubscriber<FeedResponse<CosmosItemProperties>> testSubscriber = new TestSubscriber<>(); queryObservable.subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertNoErrors(); testSubscriber.assertComplete(); FeedResponse<CosmosItemProperties> firstPage = (FeedResponse<CosmosItemProperties>) testSubscriber.getEvents().get(0).get(0); requestContinuation = firstPage.continuationToken(); receivedDocuments.addAll(firstPage.results()); continuationTokens.add(requestContinuation); } while (requestContinuation != null); return receivedDocuments; } private static CosmosItemProperties getDocumentDefinition(String partitionKey, String id, Map<String, Object> keyValuePair) { StringBuilder sb = new StringBuilder(); sb.append("{\n"); for(String key: keyValuePair.keySet()) { Object val = keyValuePair.get(key); sb.append(" "); sb.append("\"").append(key).append("\"").append(" :" ); if (val == null) { sb.append("null"); } else { sb.append(toJson(val)); } sb.append(",\n"); } sb.append(String.format(" \"id\": \"%s\",\n", id)); sb.append(String.format(" \"mypk\": \"%s\"\n", partitionKey)); sb.append("}"); return new CosmosItemProperties(sb.toString()); } private static CosmosItemProperties getDocumentDefinition(Map<String, Object> keyValuePair) { String uuid = UUID.randomUUID().toString(); return getDocumentDefinition(uuid, uuid, keyValuePair); } private static String toJson(Object object){ try { return Utils.getSimpleObjectMapper().writeValueAsString(object); } catch (JsonProcessingException e) { throw new IllegalStateException(e); } } }
class OrderbyDocumentQueryTest extends TestSuiteBase { private final double minQueryRequestChargePerPartition = 2.0; private CosmosClient client; private CosmosContainer createdCollection; private CosmosDatabase createdDatabase; private List<CosmosItemProperties> createdDocuments = new ArrayList<>(); private int numberOfPartitions; @Factory(dataProvider = "clientBuildersWithDirect") public OrderbyDocumentQueryTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider") public void queryDocumentsValidateContent(boolean qmEnabled) throws Exception { CosmosItemProperties expectedDocument = createdDocuments.get(0); String query = String.format("SELECT * from root r where r.propStr = '%s'" + " ORDER BY r.propInt" , expectedDocument.getString("propStr")); FeedOptions options = new FeedOptions(); options.enableCrossPartitionQuery(true); options.populateQueryMetrics(qmEnabled); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); List<String> expectedResourceIds = new ArrayList<>(); expectedResourceIds.add(expectedDocument.resourceId()); Map<String, ResourceValidator<CosmosItemProperties>> resourceIDToValidator = new HashMap<>(); resourceIDToValidator.put(expectedDocument.resourceId(), new ResourceValidator.Builder<CosmosItemProperties>().areEqual(expectedDocument).build()); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .numberOfPages(1) .containsExactly(expectedResourceIds) .validateAllResources(resourceIDToValidator) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>().hasRequestChargeHeader().build()) .hasValidQueryMetrics(qmEnabled) .build(); validateQuerySuccess(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryDocuments_NoResults() throws Exception { String query = "SELECT * from root r where r.id = '2' ORDER BY r.propInt"; FeedOptions options = new FeedOptions(); options.enableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(new ArrayList<>()) .numberOfPages(1) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .hasRequestChargeHeader().build()) .build(); validateQuerySuccess(queryObservable, validator); } @DataProvider(name = "sortOrder") public Object[][] sortOrder() { return new Object[][] { { "ASC" }, {"DESC"} }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderBy(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propInt %s", sortOrder); FeedOptions options = new FeedOptions(); options.enableCrossPartitionQuery(true); int pageSize = 3; options.maxItemCount(pageSize); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> d.getInt("propInt"), validatorComparator); if ("DESC".equals(sortOrder)) { Collections.reverse(expectedResourceIds); } int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByInt() throws Exception { String query = "SELECT * FROM r ORDER BY r.propInt"; FeedOptions options = new FeedOptions(); options.enableCrossPartitionQuery(true); int pageSize = 3; options.maxItemCount(pageSize); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> d.getInt("propInt"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByString() throws Exception { String query = "SELECT * FROM r ORDER BY r.propStr"; FeedOptions options = new FeedOptions(); options.enableCrossPartitionQuery(true); int pageSize = 3; options.maxItemCount(pageSize); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); Comparator<String> validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propStr", d -> d.getString("propStr"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable, validator); } @DataProvider(name = "topValue") public Object[][] topValueParameter() { return new Object[][] { { 0 }, { 1 }, { 5 }, { createdDocuments.size() - 1 }, { createdDocuments.size() }, { createdDocuments.size() + 1 }, { 2 * createdDocuments.size() } }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "topValue") public void queryOrderWithTop(int topValue) throws Exception { String query = String.format("SELECT TOP %d * FROM r ORDER BY r.propInt", topValue); FeedOptions options = new FeedOptions(); options.enableCrossPartitionQuery(true); int pageSize = 3; options.maxItemCount(pageSize); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> d.getInt("propInt"), validatorComparator) .stream().limit(topValue).collect(Collectors.toList()); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * (topValue > 0 ? minQueryRequestChargePerPartition : 1)) .build(); validateQuerySuccess(queryObservable, validator); } private <T> List<String> sortDocumentsAndCollectResourceIds(String propName, Function<CosmosItemProperties, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .filter(d -> d.getMap().containsKey(propName)) .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(Resource::resourceId).collect(Collectors.toList()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryScopedToSinglePartition_StartWithContinuationToken() throws Exception { String query = "SELECT * FROM r ORDER BY r.propScopedPartitionInt ASC"; FeedOptions options = new FeedOptions(); options.partitionKey(new PartitionKey("duplicateParitionKeyValue")); options.maxItemCount(3); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); TestSubscriber<FeedResponse<CosmosItemProperties>> subscriber = new TestSubscriber<>(); queryObservable.take(1).subscribe(subscriber); subscriber.awaitTerminalEvent(); subscriber.assertComplete(); subscriber.assertNoErrors(); assertThat(subscriber.valueCount()).isEqualTo(1); FeedResponse<CosmosItemProperties> page = (FeedResponse<CosmosItemProperties>) subscriber.getEvents().get(0).get(0); assertThat(page.results()).hasSize(3); assertThat(page.continuationToken()).isNotEmpty(); options.requestContinuation(page.continuationToken()); queryObservable = createdCollection.queryItems(query, options); List<CosmosItemProperties> expectedDocs = createdDocuments.stream() .filter(d -> (StringUtils.equals("duplicateParitionKeyValue", d.getString("mypk")))) .filter(d -> (d.getInt("propScopedPartitionInt") > 2)).collect(Collectors.toList()); int expectedPageSize = (expectedDocs.size() + options.maxItemCount() - 1) / options.maxItemCount(); assertThat(expectedDocs).hasSize(10 - 3); FeedResponseListValidator<CosmosItemProperties> validator = null; validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(expectedDocs.stream() .sorted((e1, e2) -> Integer.compare(e1.getInt("propScopedPartitionInt"), e2.getInt("propScopedPartitionInt"))) .map(d -> d.resourceId()).collect(Collectors.toList())) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .requestChargeGreaterThanOrEqualTo(1.0).build()) .build(); validateQuerySuccess(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void orderByContinuationTokenRoundTrip() throws Exception { { OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); String serialized = orderByContinuationToken.toString(); ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse(serialized, outOrderByContinuationToken)).isTrue(); OrderByContinuationToken deserialized = outOrderByContinuationToken.v; CompositeContinuationToken compositeContinuationToken = deserialized.getCompositeContinuationToken(); String token = compositeContinuationToken.getToken(); Range<String> range = compositeContinuationToken.getRange(); assertThat(token).isEqualTo("asdf"); assertThat(range.getMin()).isEqualTo("A"); assertThat(range.getMax()).isEqualTo("D"); assertThat(range.isMinInclusive()).isEqualTo(false); assertThat(range.isMaxInclusive()).isEqualTo(true); QueryItem[] orderByItems = deserialized.getOrderByItems(); assertThat(orderByItems).isNotNull(); assertThat(orderByItems.length).isEqualTo(1); assertThat(orderByItems[0].getItem()).isEqualTo(42); String rid = deserialized.getRid(); assertThat(rid).isEqualTo("rid"); boolean inclusive = deserialized.getInclusive(); assertThat(inclusive).isEqualTo(false); } { ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse("{\"property\" : \"Not a valid Order By Token\"}", outOrderByContinuationToken)).isFalse(); } } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder", retryAnalyzer = RetryAnalyzer.class) public void queryDocumentsWithOrderByContinuationTokensInteger(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.propInt %s", sortOrder); Comparator<Integer> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<Integer> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> d.getInt("propInt"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100}, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<String> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> d.getString("id"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithInvalidOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> validatorComparator; if(sortOrder.equals("ASC")) { validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); }else{ validatorComparator = Comparator.nullsFirst(Comparator.<String>reverseOrder()); } List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> d.getString("id"), validatorComparator); this.assertInvalidContinuationToken(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } public CosmosItemProperties createDocument(CosmosContainer cosmosContainer, Map<String, Object> keyValueProps) throws CosmosClientException { CosmosItemProperties docDefinition = getDocumentDefinition(keyValueProps); return cosmosContainer.createItem(docDefinition).block().properties(); } public List<CosmosItemProperties> bulkInsert(CosmosContainer cosmosContainer, List<Map<String, Object>> keyValuePropsList) { ArrayList<CosmosItemProperties> result = new ArrayList<CosmosItemProperties>(); for(Map<String, Object> keyValueProps: keyValuePropsList) { CosmosItemProperties docDefinition = getDocumentDefinition(keyValueProps); result.add(docDefinition); } return bulkInsertBlocking(cosmosContainer, result); } @BeforeMethod(groups = { "simple" }) public void beforeMethod() throws Exception { TimeUnit.SECONDS.sleep(10); } @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private void assertInvalidContinuationToken(String query, int[] pageSize, List<String> expectedIds) { String requestContinuation = null; do { FeedOptions options = new FeedOptions(); options.maxItemCount(1); options.enableCrossPartitionQuery(true); options.maxDegreeOfParallelism(2); OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); options.requestContinuation(orderByContinuationToken.toString()); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); TestSubscriber<FeedResponse<CosmosItemProperties>> testSubscriber = new TestSubscriber<>(); queryObservable.subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertError(CosmosClientException.class); } while (requestContinuation != null); } private void queryWithContinuationTokensAndPageSizes(String query, int[] pageSizes, List<String> expectedIds) { for (int pageSize : pageSizes) { List<CosmosItemProperties> receivedDocuments = this.queryWithContinuationTokens(query, pageSize); List<String> actualIds = new ArrayList<String>(); for (CosmosItemProperties document : receivedDocuments) { actualIds.add(document.resourceId()); } assertThat(actualIds).containsExactlyElementsOf(expectedIds); } } private List<CosmosItemProperties> queryWithContinuationTokens(String query, int pageSize) { String requestContinuation = null; List<String> continuationTokens = new ArrayList<String>(); List<CosmosItemProperties> receivedDocuments = new ArrayList<CosmosItemProperties>(); do { FeedOptions options = new FeedOptions(); options.maxItemCount(pageSize); options.enableCrossPartitionQuery(true); options.maxDegreeOfParallelism(2); options.requestContinuation(requestContinuation); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); TestSubscriber<FeedResponse<CosmosItemProperties>> testSubscriber = new TestSubscriber<>(); queryObservable.subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertNoErrors(); testSubscriber.assertComplete(); FeedResponse<CosmosItemProperties> firstPage = (FeedResponse<CosmosItemProperties>) testSubscriber.getEvents().get(0).get(0); requestContinuation = firstPage.continuationToken(); receivedDocuments.addAll(firstPage.results()); continuationTokens.add(requestContinuation); } while (requestContinuation != null); return receivedDocuments; } private static CosmosItemProperties getDocumentDefinition(String partitionKey, String id, Map<String, Object> keyValuePair) { StringBuilder sb = new StringBuilder(); sb.append("{\n"); for(String key: keyValuePair.keySet()) { Object val = keyValuePair.get(key); sb.append(" "); sb.append("\"").append(key).append("\"").append(" :" ); if (val == null) { sb.append("null"); } else { sb.append(toJson(val)); } sb.append(",\n"); } sb.append(String.format(" \"id\": \"%s\",\n", id)); sb.append(String.format(" \"mypk\": \"%s\"\n", partitionKey)); sb.append("}"); return new CosmosItemProperties(sb.toString()); } private static CosmosItemProperties getDocumentDefinition(Map<String, Object> keyValuePair) { String uuid = UUID.randomUUID().toString(); return getDocumentDefinition(uuid, uuid, keyValuePair); } private static String toJson(Object object){ try { return Utils.getSimpleObjectMapper().writeValueAsString(object); } catch (JsonProcessingException e) { throw new IllegalStateException(e); } } }
Removed
public void beforeClass() throws Exception { System.out.println("OrderbyDocumentQueryTest.beforeClass"); client = clientBuilder().build(); createdDatabase = getSharedCosmosDatabase(client); createdCollection = getSharedMultiPartitionCosmosContainer(client); System.out.println("bef: truncate collection"); truncateCollection(createdCollection); System.out.println("after: truncate collection"); List<Map<String, Object>> keyValuePropsList = new ArrayList<>(); Map<String, Object> props; System.out.println("bef: create 30 docs"); for(int i = 0; i < 30; i++) { props = new HashMap<>(); props.put("propInt", i); props.put("propStr", String.valueOf(i)); keyValuePropsList.add(props); } System.out.println("OrderbyDocumentQueryTest.beforeClass : created 30 docs"); props = new HashMap<>(); keyValuePropsList.add(props); createdDocuments = bulkInsert(createdCollection, keyValuePropsList); for(int i = 0; i < 10; i++) { Map<String, Object> p = new HashMap<>(); p.put("propScopedPartitionInt", i); CosmosItemProperties doc = getDocumentDefinition("duplicateParitionKeyValue", UUID.randomUUID().toString(), p); CosmosItemRequestOptions options = new CosmosItemRequestOptions(); options.partitionKey(new PartitionKey(doc.get("mypk"))); createdDocuments.add(createDocument(createdCollection, doc).read(options).block().properties()); } System.out.println("OrderbyDocumentQueryTest.beforeClass : created 10 docs"); numberOfPartitions = CosmosBridgeInternal.getAsyncDocumentClient(client) .readPartitionKeyRanges("dbs/" + createdDatabase.id() + "/colls/" + createdCollection.id(), null) .flatMap(p -> Flux.fromIterable(p.results())).collectList().single().block().size(); System.out.println("numberOfPartitions = " + numberOfPartitions); waitIfNeededForReplicasToCatchUp(clientBuilder()); }
System.out.println("OrderbyDocumentQueryTest.beforeClass");
public void beforeClass() throws Exception { client = clientBuilder().build(); createdDatabase = getSharedCosmosDatabase(client); createdCollection = getSharedMultiPartitionCosmosContainer(client); truncateCollection(createdCollection); List<Map<String, Object>> keyValuePropsList = new ArrayList<>(); Map<String, Object> props; for(int i = 0; i < 30; i++) { props = new HashMap<>(); props.put("propInt", i); props.put("propStr", String.valueOf(i)); keyValuePropsList.add(props); } props = new HashMap<>(); keyValuePropsList.add(props); createdDocuments = bulkInsert(createdCollection, keyValuePropsList); for(int i = 0; i < 10; i++) { Map<String, Object> p = new HashMap<>(); p.put("propScopedPartitionInt", i); CosmosItemProperties doc = getDocumentDefinition("duplicateParitionKeyValue", UUID.randomUUID().toString(), p); CosmosItemRequestOptions options = new CosmosItemRequestOptions(); options.partitionKey(new PartitionKey(doc.get("mypk"))); createdDocuments.add(createDocument(createdCollection, doc).read(options).block().properties()); } numberOfPartitions = CosmosBridgeInternal.getAsyncDocumentClient(client) .readPartitionKeyRanges("dbs/" + createdDatabase.id() + "/colls/" + createdCollection.id(), null) .flatMap(p -> Flux.fromIterable(p.results())).collectList().single().block().size(); waitIfNeededForReplicasToCatchUp(clientBuilder()); }
class OrderbyDocumentQueryTest extends TestSuiteBase { private final double minQueryRequestChargePerPartition = 2.0; private CosmosClient client; private CosmosContainer createdCollection; private CosmosDatabase createdDatabase; private List<CosmosItemProperties> createdDocuments = new ArrayList<>(); private int numberOfPartitions; @Factory(dataProvider = "clientBuildersWithDirect") public OrderbyDocumentQueryTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider") public void queryDocumentsValidateContent(boolean qmEnabled) throws Exception { CosmosItemProperties expectedDocument = createdDocuments.get(0); String query = String.format("SELECT * from root r where r.propStr = '%s'" + " ORDER BY r.propInt" , expectedDocument.getString("propStr")); FeedOptions options = new FeedOptions(); options.enableCrossPartitionQuery(true); options.populateQueryMetrics(qmEnabled); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); List<String> expectedResourceIds = new ArrayList<>(); expectedResourceIds.add(expectedDocument.resourceId()); Map<String, ResourceValidator<CosmosItemProperties>> resourceIDToValidator = new HashMap<>(); resourceIDToValidator.put(expectedDocument.resourceId(), new ResourceValidator.Builder<CosmosItemProperties>().areEqual(expectedDocument).build()); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .numberOfPages(1) .containsExactly(expectedResourceIds) .validateAllResources(resourceIDToValidator) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>().hasRequestChargeHeader().build()) .hasValidQueryMetrics(qmEnabled) .build(); validateQuerySuccess(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryDocuments_NoResults() throws Exception { String query = "SELECT * from root r where r.id = '2' ORDER BY r.propInt"; FeedOptions options = new FeedOptions(); options.enableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(new ArrayList<>()) .numberOfPages(1) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .hasRequestChargeHeader().build()) .build(); validateQuerySuccess(queryObservable, validator); } @DataProvider(name = "sortOrder") public Object[][] sortOrder() { return new Object[][] { { "ASC" }, {"DESC"} }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderBy(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propInt %s", sortOrder); FeedOptions options = new FeedOptions(); options.enableCrossPartitionQuery(true); int pageSize = 3; options.maxItemCount(pageSize); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> d.getInt("propInt"), validatorComparator); if ("DESC".equals(sortOrder)) { Collections.reverse(expectedResourceIds); } int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByInt() throws Exception { String query = "SELECT * FROM r ORDER BY r.propInt"; FeedOptions options = new FeedOptions(); options.enableCrossPartitionQuery(true); int pageSize = 3; options.maxItemCount(pageSize); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> d.getInt("propInt"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByString() throws Exception { String query = "SELECT * FROM r ORDER BY r.propStr"; FeedOptions options = new FeedOptions(); options.enableCrossPartitionQuery(true); int pageSize = 3; options.maxItemCount(pageSize); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); Comparator<String> validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propStr", d -> d.getString("propStr"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable, validator); } @DataProvider(name = "topValue") public Object[][] topValueParameter() { return new Object[][] { { 0 }, { 1 }, { 5 }, { createdDocuments.size() - 1 }, { createdDocuments.size() }, { createdDocuments.size() + 1 }, { 2 * createdDocuments.size() } }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "topValue") public void queryOrderWithTop(int topValue) throws Exception { String query = String.format("SELECT TOP %d * FROM r ORDER BY r.propInt", topValue); FeedOptions options = new FeedOptions(); options.enableCrossPartitionQuery(true); int pageSize = 3; options.maxItemCount(pageSize); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> d.getInt("propInt"), validatorComparator) .stream().limit(topValue).collect(Collectors.toList()); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * (topValue > 0 ? minQueryRequestChargePerPartition : 1)) .build(); validateQuerySuccess(queryObservable, validator); } private <T> List<String> sortDocumentsAndCollectResourceIds(String propName, Function<CosmosItemProperties, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .filter(d -> d.getMap().containsKey(propName)) .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(Resource::resourceId).collect(Collectors.toList()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryScopedToSinglePartition_StartWithContinuationToken() throws Exception { String query = "SELECT * FROM r ORDER BY r.propScopedPartitionInt ASC"; FeedOptions options = new FeedOptions(); options.partitionKey(new PartitionKey("duplicateParitionKeyValue")); options.maxItemCount(3); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); TestSubscriber<FeedResponse<CosmosItemProperties>> subscriber = new TestSubscriber<>(); queryObservable.take(1).subscribe(subscriber); subscriber.awaitTerminalEvent(); subscriber.assertComplete(); subscriber.assertNoErrors(); assertThat(subscriber.valueCount()).isEqualTo(1); FeedResponse<CosmosItemProperties> page = (FeedResponse<CosmosItemProperties>) subscriber.getEvents().get(0).get(0); assertThat(page.results()).hasSize(3); assertThat(page.continuationToken()).isNotEmpty(); options.requestContinuation(page.continuationToken()); queryObservable = createdCollection.queryItems(query, options); List<CosmosItemProperties> expectedDocs = createdDocuments.stream() .filter(d -> (StringUtils.equals("duplicateParitionKeyValue", d.getString("mypk")))) .filter(d -> (d.getInt("propScopedPartitionInt") > 2)).collect(Collectors.toList()); int expectedPageSize = (expectedDocs.size() + options.maxItemCount() - 1) / options.maxItemCount(); assertThat(expectedDocs).hasSize(10 - 3); FeedResponseListValidator<CosmosItemProperties> validator = null; validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(expectedDocs.stream() .sorted((e1, e2) -> Integer.compare(e1.getInt("propScopedPartitionInt"), e2.getInt("propScopedPartitionInt"))) .map(d -> d.resourceId()).collect(Collectors.toList())) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .requestChargeGreaterThanOrEqualTo(1.0).build()) .build(); validateQuerySuccess(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void orderByContinuationTokenRoundTrip() throws Exception { { OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); String serialized = orderByContinuationToken.toString(); ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse(serialized, outOrderByContinuationToken)).isTrue(); OrderByContinuationToken deserialized = outOrderByContinuationToken.v; CompositeContinuationToken compositeContinuationToken = deserialized.getCompositeContinuationToken(); String token = compositeContinuationToken.getToken(); Range<String> range = compositeContinuationToken.getRange(); assertThat(token).isEqualTo("asdf"); assertThat(range.getMin()).isEqualTo("A"); assertThat(range.getMax()).isEqualTo("D"); assertThat(range.isMinInclusive()).isEqualTo(false); assertThat(range.isMaxInclusive()).isEqualTo(true); QueryItem[] orderByItems = deserialized.getOrderByItems(); assertThat(orderByItems).isNotNull(); assertThat(orderByItems.length).isEqualTo(1); assertThat(orderByItems[0].getItem()).isEqualTo(42); String rid = deserialized.getRid(); assertThat(rid).isEqualTo("rid"); boolean inclusive = deserialized.getInclusive(); assertThat(inclusive).isEqualTo(false); } { ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse("{\"property\" : \"Not a valid Order By Token\"}", outOrderByContinuationToken)).isFalse(); } } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder", retryAnalyzer = RetryAnalyzer.class) public void queryDocumentsWithOrderByContinuationTokensInteger(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.propInt %s", sortOrder); Comparator<Integer> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<Integer> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> d.getInt("propInt"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100}, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<String> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> d.getString("id"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithInvalidOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> validatorComparator; if(sortOrder.equals("ASC")) { validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); }else{ validatorComparator = Comparator.nullsFirst(Comparator.<String>reverseOrder()); } List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> d.getString("id"), validatorComparator); this.assertInvalidContinuationToken(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } public CosmosItemProperties createDocument(CosmosContainer cosmosContainer, Map<String, Object> keyValueProps) throws CosmosClientException { CosmosItemProperties docDefinition = getDocumentDefinition(keyValueProps); return cosmosContainer.createItem(docDefinition).block().properties(); } public List<CosmosItemProperties> bulkInsert(CosmosContainer cosmosContainer, List<Map<String, Object>> keyValuePropsList) { ArrayList<CosmosItemProperties> result = new ArrayList<CosmosItemProperties>(); for(Map<String, Object> keyValueProps: keyValuePropsList) { CosmosItemProperties docDefinition = getDocumentDefinition(keyValueProps); result.add(docDefinition); } return bulkInsertBlocking(cosmosContainer, result); } @BeforeMethod(groups = { "simple" }) public void beforeMethod() throws Exception { TimeUnit.SECONDS.sleep(10); } @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private void assertInvalidContinuationToken(String query, int[] pageSize, List<String> expectedIds) { String requestContinuation = null; do { FeedOptions options = new FeedOptions(); options.maxItemCount(1); options.enableCrossPartitionQuery(true); options.maxDegreeOfParallelism(2); OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); options.requestContinuation(orderByContinuationToken.toString()); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); TestSubscriber<FeedResponse<CosmosItemProperties>> testSubscriber = new TestSubscriber<>(); queryObservable.subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertError(CosmosClientException.class); } while (requestContinuation != null); } private void queryWithContinuationTokensAndPageSizes(String query, int[] pageSizes, List<String> expectedIds) { for (int pageSize : pageSizes) { List<CosmosItemProperties> receivedDocuments = this.queryWithContinuationTokens(query, pageSize); List<String> actualIds = new ArrayList<String>(); for (CosmosItemProperties document : receivedDocuments) { actualIds.add(document.resourceId()); } assertThat(actualIds).containsExactlyElementsOf(expectedIds); } } private List<CosmosItemProperties> queryWithContinuationTokens(String query, int pageSize) { String requestContinuation = null; List<String> continuationTokens = new ArrayList<String>(); List<CosmosItemProperties> receivedDocuments = new ArrayList<CosmosItemProperties>(); do { FeedOptions options = new FeedOptions(); options.maxItemCount(pageSize); options.enableCrossPartitionQuery(true); options.maxDegreeOfParallelism(2); options.requestContinuation(requestContinuation); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); TestSubscriber<FeedResponse<CosmosItemProperties>> testSubscriber = new TestSubscriber<>(); queryObservable.subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertNoErrors(); testSubscriber.assertComplete(); FeedResponse<CosmosItemProperties> firstPage = (FeedResponse<CosmosItemProperties>) testSubscriber.getEvents().get(0).get(0); requestContinuation = firstPage.continuationToken(); receivedDocuments.addAll(firstPage.results()); continuationTokens.add(requestContinuation); } while (requestContinuation != null); return receivedDocuments; } private static CosmosItemProperties getDocumentDefinition(String partitionKey, String id, Map<String, Object> keyValuePair) { StringBuilder sb = new StringBuilder(); sb.append("{\n"); for(String key: keyValuePair.keySet()) { Object val = keyValuePair.get(key); sb.append(" "); sb.append("\"").append(key).append("\"").append(" :" ); if (val == null) { sb.append("null"); } else { sb.append(toJson(val)); } sb.append(",\n"); } sb.append(String.format(" \"id\": \"%s\",\n", id)); sb.append(String.format(" \"mypk\": \"%s\"\n", partitionKey)); sb.append("}"); return new CosmosItemProperties(sb.toString()); } private static CosmosItemProperties getDocumentDefinition(Map<String, Object> keyValuePair) { String uuid = UUID.randomUUID().toString(); return getDocumentDefinition(uuid, uuid, keyValuePair); } private static String toJson(Object object){ try { return Utils.getSimpleObjectMapper().writeValueAsString(object); } catch (JsonProcessingException e) { throw new IllegalStateException(e); } } }
class OrderbyDocumentQueryTest extends TestSuiteBase { private final double minQueryRequestChargePerPartition = 2.0; private CosmosClient client; private CosmosContainer createdCollection; private CosmosDatabase createdDatabase; private List<CosmosItemProperties> createdDocuments = new ArrayList<>(); private int numberOfPartitions; @Factory(dataProvider = "clientBuildersWithDirect") public OrderbyDocumentQueryTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider") public void queryDocumentsValidateContent(boolean qmEnabled) throws Exception { CosmosItemProperties expectedDocument = createdDocuments.get(0); String query = String.format("SELECT * from root r where r.propStr = '%s'" + " ORDER BY r.propInt" , expectedDocument.getString("propStr")); FeedOptions options = new FeedOptions(); options.enableCrossPartitionQuery(true); options.populateQueryMetrics(qmEnabled); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); List<String> expectedResourceIds = new ArrayList<>(); expectedResourceIds.add(expectedDocument.resourceId()); Map<String, ResourceValidator<CosmosItemProperties>> resourceIDToValidator = new HashMap<>(); resourceIDToValidator.put(expectedDocument.resourceId(), new ResourceValidator.Builder<CosmosItemProperties>().areEqual(expectedDocument).build()); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .numberOfPages(1) .containsExactly(expectedResourceIds) .validateAllResources(resourceIDToValidator) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>().hasRequestChargeHeader().build()) .hasValidQueryMetrics(qmEnabled) .build(); validateQuerySuccess(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryDocuments_NoResults() throws Exception { String query = "SELECT * from root r where r.id = '2' ORDER BY r.propInt"; FeedOptions options = new FeedOptions(); options.enableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(new ArrayList<>()) .numberOfPages(1) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .hasRequestChargeHeader().build()) .build(); validateQuerySuccess(queryObservable, validator); } @DataProvider(name = "sortOrder") public Object[][] sortOrder() { return new Object[][] { { "ASC" }, {"DESC"} }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderBy(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propInt %s", sortOrder); FeedOptions options = new FeedOptions(); options.enableCrossPartitionQuery(true); int pageSize = 3; options.maxItemCount(pageSize); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> d.getInt("propInt"), validatorComparator); if ("DESC".equals(sortOrder)) { Collections.reverse(expectedResourceIds); } int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByInt() throws Exception { String query = "SELECT * FROM r ORDER BY r.propInt"; FeedOptions options = new FeedOptions(); options.enableCrossPartitionQuery(true); int pageSize = 3; options.maxItemCount(pageSize); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> d.getInt("propInt"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByString() throws Exception { String query = "SELECT * FROM r ORDER BY r.propStr"; FeedOptions options = new FeedOptions(); options.enableCrossPartitionQuery(true); int pageSize = 3; options.maxItemCount(pageSize); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); Comparator<String> validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propStr", d -> d.getString("propStr"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable, validator); } @DataProvider(name = "topValue") public Object[][] topValueParameter() { return new Object[][] { { 0 }, { 1 }, { 5 }, { createdDocuments.size() - 1 }, { createdDocuments.size() }, { createdDocuments.size() + 1 }, { 2 * createdDocuments.size() } }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "topValue") public void queryOrderWithTop(int topValue) throws Exception { String query = String.format("SELECT TOP %d * FROM r ORDER BY r.propInt", topValue); FeedOptions options = new FeedOptions(); options.enableCrossPartitionQuery(true); int pageSize = 3; options.maxItemCount(pageSize); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> d.getInt("propInt"), validatorComparator) .stream().limit(topValue).collect(Collectors.toList()); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * (topValue > 0 ? minQueryRequestChargePerPartition : 1)) .build(); validateQuerySuccess(queryObservable, validator); } private <T> List<String> sortDocumentsAndCollectResourceIds(String propName, Function<CosmosItemProperties, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .filter(d -> d.getMap().containsKey(propName)) .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(Resource::resourceId).collect(Collectors.toList()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryScopedToSinglePartition_StartWithContinuationToken() throws Exception { String query = "SELECT * FROM r ORDER BY r.propScopedPartitionInt ASC"; FeedOptions options = new FeedOptions(); options.partitionKey(new PartitionKey("duplicateParitionKeyValue")); options.maxItemCount(3); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); TestSubscriber<FeedResponse<CosmosItemProperties>> subscriber = new TestSubscriber<>(); queryObservable.take(1).subscribe(subscriber); subscriber.awaitTerminalEvent(); subscriber.assertComplete(); subscriber.assertNoErrors(); assertThat(subscriber.valueCount()).isEqualTo(1); FeedResponse<CosmosItemProperties> page = (FeedResponse<CosmosItemProperties>) subscriber.getEvents().get(0).get(0); assertThat(page.results()).hasSize(3); assertThat(page.continuationToken()).isNotEmpty(); options.requestContinuation(page.continuationToken()); queryObservable = createdCollection.queryItems(query, options); List<CosmosItemProperties> expectedDocs = createdDocuments.stream() .filter(d -> (StringUtils.equals("duplicateParitionKeyValue", d.getString("mypk")))) .filter(d -> (d.getInt("propScopedPartitionInt") > 2)).collect(Collectors.toList()); int expectedPageSize = (expectedDocs.size() + options.maxItemCount() - 1) / options.maxItemCount(); assertThat(expectedDocs).hasSize(10 - 3); FeedResponseListValidator<CosmosItemProperties> validator = null; validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(expectedDocs.stream() .sorted((e1, e2) -> Integer.compare(e1.getInt("propScopedPartitionInt"), e2.getInt("propScopedPartitionInt"))) .map(d -> d.resourceId()).collect(Collectors.toList())) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .requestChargeGreaterThanOrEqualTo(1.0).build()) .build(); validateQuerySuccess(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void orderByContinuationTokenRoundTrip() throws Exception { { OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); String serialized = orderByContinuationToken.toString(); ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse(serialized, outOrderByContinuationToken)).isTrue(); OrderByContinuationToken deserialized = outOrderByContinuationToken.v; CompositeContinuationToken compositeContinuationToken = deserialized.getCompositeContinuationToken(); String token = compositeContinuationToken.getToken(); Range<String> range = compositeContinuationToken.getRange(); assertThat(token).isEqualTo("asdf"); assertThat(range.getMin()).isEqualTo("A"); assertThat(range.getMax()).isEqualTo("D"); assertThat(range.isMinInclusive()).isEqualTo(false); assertThat(range.isMaxInclusive()).isEqualTo(true); QueryItem[] orderByItems = deserialized.getOrderByItems(); assertThat(orderByItems).isNotNull(); assertThat(orderByItems.length).isEqualTo(1); assertThat(orderByItems[0].getItem()).isEqualTo(42); String rid = deserialized.getRid(); assertThat(rid).isEqualTo("rid"); boolean inclusive = deserialized.getInclusive(); assertThat(inclusive).isEqualTo(false); } { ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse("{\"property\" : \"Not a valid Order By Token\"}", outOrderByContinuationToken)).isFalse(); } } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder", retryAnalyzer = RetryAnalyzer.class) public void queryDocumentsWithOrderByContinuationTokensInteger(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.propInt %s", sortOrder); Comparator<Integer> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<Integer> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> d.getInt("propInt"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100}, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<String> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> d.getString("id"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithInvalidOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> validatorComparator; if(sortOrder.equals("ASC")) { validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); }else{ validatorComparator = Comparator.nullsFirst(Comparator.<String>reverseOrder()); } List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> d.getString("id"), validatorComparator); this.assertInvalidContinuationToken(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } public CosmosItemProperties createDocument(CosmosContainer cosmosContainer, Map<String, Object> keyValueProps) throws CosmosClientException { CosmosItemProperties docDefinition = getDocumentDefinition(keyValueProps); return cosmosContainer.createItem(docDefinition).block().properties(); } public List<CosmosItemProperties> bulkInsert(CosmosContainer cosmosContainer, List<Map<String, Object>> keyValuePropsList) { ArrayList<CosmosItemProperties> result = new ArrayList<CosmosItemProperties>(); for(Map<String, Object> keyValueProps: keyValuePropsList) { CosmosItemProperties docDefinition = getDocumentDefinition(keyValueProps); result.add(docDefinition); } return bulkInsertBlocking(cosmosContainer, result); } @BeforeMethod(groups = { "simple" }) public void beforeMethod() throws Exception { TimeUnit.SECONDS.sleep(10); } @BeforeClass(groups = { "simple" }, timeOut = SETUP_TIMEOUT) @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private void assertInvalidContinuationToken(String query, int[] pageSize, List<String> expectedIds) { String requestContinuation = null; do { FeedOptions options = new FeedOptions(); options.maxItemCount(1); options.enableCrossPartitionQuery(true); options.maxDegreeOfParallelism(2); OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); options.requestContinuation(orderByContinuationToken.toString()); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); TestSubscriber<FeedResponse<CosmosItemProperties>> testSubscriber = new TestSubscriber<>(); queryObservable.subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertError(CosmosClientException.class); } while (requestContinuation != null); } private void queryWithContinuationTokensAndPageSizes(String query, int[] pageSizes, List<String> expectedIds) { for (int pageSize : pageSizes) { List<CosmosItemProperties> receivedDocuments = this.queryWithContinuationTokens(query, pageSize); List<String> actualIds = new ArrayList<String>(); for (CosmosItemProperties document : receivedDocuments) { actualIds.add(document.resourceId()); } assertThat(actualIds).containsExactlyElementsOf(expectedIds); } } private List<CosmosItemProperties> queryWithContinuationTokens(String query, int pageSize) { String requestContinuation = null; List<String> continuationTokens = new ArrayList<String>(); List<CosmosItemProperties> receivedDocuments = new ArrayList<CosmosItemProperties>(); do { FeedOptions options = new FeedOptions(); options.maxItemCount(pageSize); options.enableCrossPartitionQuery(true); options.maxDegreeOfParallelism(2); options.requestContinuation(requestContinuation); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options); TestSubscriber<FeedResponse<CosmosItemProperties>> testSubscriber = new TestSubscriber<>(); queryObservable.subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertNoErrors(); testSubscriber.assertComplete(); FeedResponse<CosmosItemProperties> firstPage = (FeedResponse<CosmosItemProperties>) testSubscriber.getEvents().get(0).get(0); requestContinuation = firstPage.continuationToken(); receivedDocuments.addAll(firstPage.results()); continuationTokens.add(requestContinuation); } while (requestContinuation != null); return receivedDocuments; } private static CosmosItemProperties getDocumentDefinition(String partitionKey, String id, Map<String, Object> keyValuePair) { StringBuilder sb = new StringBuilder(); sb.append("{\n"); for(String key: keyValuePair.keySet()) { Object val = keyValuePair.get(key); sb.append(" "); sb.append("\"").append(key).append("\"").append(" :" ); if (val == null) { sb.append("null"); } else { sb.append(toJson(val)); } sb.append(",\n"); } sb.append(String.format(" \"id\": \"%s\",\n", id)); sb.append(String.format(" \"mypk\": \"%s\"\n", partitionKey)); sb.append("}"); return new CosmosItemProperties(sb.toString()); } private static CosmosItemProperties getDocumentDefinition(Map<String, Object> keyValuePair) { String uuid = UUID.randomUUID().toString(); return getDocumentDefinition(uuid, uuid, keyValuePair); } private static String toJson(Object object){ try { return Utils.getSimpleObjectMapper().writeValueAsString(object); } catch (JsonProcessingException e) { throw new IllegalStateException(e); } } }
Good catch
public NettyAsyncHttpClient build() { HttpClient nettyHttpClient = HttpClient.create() .port(port) .wiretap(enableWiretap) .tcpConfiguration(tcpConfig -> { if (nioEventLoopGroup != null) { tcpConfig = tcpConfig.runOn(nioEventLoopGroup); } if (proxyOptions != null) { ProxyProvider.Proxy nettyProxy; switch (proxyOptions.getType()) { case HTTP: nettyProxy = ProxyProvider.Proxy.HTTP; break; case SOCKS4: nettyProxy = ProxyProvider.Proxy.SOCKS4; break; case SOCKS5: nettyProxy = ProxyProvider.Proxy.SOCKS5; break; default: throw logger.logExceptionAsWarning(new IllegalStateException( String.format("Unknown Proxy type '%s' in use. Not configuring Netty proxy.", proxyOptions.getType()))); } return tcpConfig.proxy(ts -> ts.type(nettyProxy).address(proxyOptions.getAddress())); } return tcpConfig; }); return new NettyAsyncHttpClient(nettyHttpClient); }
throw logger.logExceptionAsWarning(new IllegalStateException(
public NettyAsyncHttpClient build() { HttpClient nettyHttpClient = HttpClient.create() .port(port) .wiretap(enableWiretap) .tcpConfiguration(tcpConfig -> { if (nioEventLoopGroup != null) { tcpConfig = tcpConfig.runOn(nioEventLoopGroup); } if (proxyOptions != null) { ProxyProvider.Proxy nettyProxy; switch (proxyOptions.getType()) { case HTTP: nettyProxy = ProxyProvider.Proxy.HTTP; break; case SOCKS4: nettyProxy = ProxyProvider.Proxy.SOCKS4; break; case SOCKS5: nettyProxy = ProxyProvider.Proxy.SOCKS5; break; default: throw logger.logExceptionAsError(new IllegalStateException( String.format("Unknown Proxy type '%s' in use. Not configuring Netty proxy.", proxyOptions.getType()))); } return tcpConfig.proxy(ts -> ts.type(nettyProxy).address(proxyOptions.getAddress())); } return tcpConfig; }); return new NettyAsyncHttpClient(nettyHttpClient); }
class NettyAsyncHttpClientBuilder { private final ClientLogger logger = new ClientLogger(NettyAsyncHttpClientBuilder.class); private ProxyOptions proxyOptions; private boolean enableWiretap; private int port = 80; private NioEventLoopGroup nioEventLoopGroup; /** * Creates a new builder instance, where a builder is capable of generating multiple instances of * {@link NettyAsyncHttpClient}. */ public NettyAsyncHttpClientBuilder() { } /** * Creates a new {@link NettyAsyncHttpClient} instance on every call, using the configuration set in the builder at * the time of the build method call. * * @return A new NettyAsyncHttpClient instance. * @throws IllegalStateException If the builder is configured to use an unknown proxy type. */ /** * Sets the {@link ProxyOptions proxy options} that the client will use. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.core.http.netty.NettyAsyncHttpClientBuilder * * @param proxyOptions The proxy configuration to use. * @return the updated NettyAsyncHttpClientBuilder object */ public NettyAsyncHttpClientBuilder proxy(ProxyOptions proxyOptions) { this.proxyOptions = proxyOptions; return this; } /** * Enables the Netty wiretap feature. * * @param enableWiretap Flag indicating wiretap status * @return the updated NettyAsyncHttpClientBuilder object */ public NettyAsyncHttpClientBuilder wiretap(boolean enableWiretap) { this.enableWiretap = enableWiretap; return this; } /** * Sets the port which this client should connect, which by default will be set to port 80. * * @param port The port to connect to. * @return the updated NettyAsyncHttpClientBuilder object */ public NettyAsyncHttpClientBuilder port(int port) { this.port = port; return this; } /** * Sets the NIO event loop group that will be used to run IO loops. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.core.http.netty.NettyAsyncHttpClientBuilder * * @param nioEventLoopGroup The {@link NioEventLoopGroup} that will run IO loops. * @return the updated NettyAsyncHttpClientBuilder object */ public NettyAsyncHttpClientBuilder nioEventLoopGroup(NioEventLoopGroup nioEventLoopGroup) { this.nioEventLoopGroup = nioEventLoopGroup; return this; } }
class NettyAsyncHttpClientBuilder { private final ClientLogger logger = new ClientLogger(NettyAsyncHttpClientBuilder.class); private ProxyOptions proxyOptions; private boolean enableWiretap; private int port = 80; private NioEventLoopGroup nioEventLoopGroup; /** * Creates a new builder instance, where a builder is capable of generating multiple instances of * {@link NettyAsyncHttpClient}. */ public NettyAsyncHttpClientBuilder() { } /** * Creates a new {@link NettyAsyncHttpClient} instance on every call, using the configuration set in the builder at * the time of the build method call. * * @return A new NettyAsyncHttpClient instance. * @throws IllegalStateException If the builder is configured to use an unknown proxy type. */ /** * Sets the {@link ProxyOptions proxy options} that the client will use. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.core.http.netty.NettyAsyncHttpClientBuilder * * @param proxyOptions The proxy configuration to use. * @return the updated NettyAsyncHttpClientBuilder object */ public NettyAsyncHttpClientBuilder proxy(ProxyOptions proxyOptions) { this.proxyOptions = proxyOptions; return this; } /** * Enables the Netty wiretap feature. * * @param enableWiretap Flag indicating wiretap status * @return the updated NettyAsyncHttpClientBuilder object */ public NettyAsyncHttpClientBuilder wiretap(boolean enableWiretap) { this.enableWiretap = enableWiretap; return this; } /** * Sets the port which this client should connect, which by default will be set to port 80. * * @param port The port to connect to. * @return the updated NettyAsyncHttpClientBuilder object */ public NettyAsyncHttpClientBuilder port(int port) { this.port = port; return this; } /** * Sets the NIO event loop group that will be used to run IO loops. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.core.http.netty.NettyAsyncHttpClientBuilder * * @param nioEventLoopGroup The {@link NioEventLoopGroup} that will run IO loops. * @return the updated NettyAsyncHttpClientBuilder object */ public NettyAsyncHttpClientBuilder nioEventLoopGroup(NioEventLoopGroup nioEventLoopGroup) { this.nioEventLoopGroup = nioEventLoopGroup; return this; } }
Only checking nullness should suffice, no `Interceptors` is allowed.
public OkHttpAsyncHttpClientBuilder networkInterceptors(List<Interceptor> networkInterceptors) { this.networkInterceptors = Objects.requireNonNull(networkInterceptors, "networkInterceptors cannot be null."); return this; }
this.networkInterceptors = Objects.requireNonNull(networkInterceptors, "networkInterceptors cannot be null.");
public OkHttpAsyncHttpClientBuilder networkInterceptors(List<Interceptor> networkInterceptors) { this.networkInterceptors = Objects.requireNonNull(networkInterceptors, "networkInterceptors cannot be null."); return this; }
class OkHttpAsyncHttpClientBuilder { private final okhttp3.OkHttpClient okHttpClient; private static final Duration DEFAULT_READ_TIMEOUT = Duration.ofSeconds(120); private static final Duration DEFAULT_CONNECT_TIMEOUT = Duration.ofSeconds(60); private List<Interceptor> networkInterceptors = new ArrayList<>(); private Duration readTimeout; private Duration connectionTimeout; private ConnectionPool connectionPool; private Dispatcher dispatcher; private java.net.Proxy proxy; private Authenticator proxyAuthenticator; /** * Creates OkHttpAsyncHttpClientBuilder. */ public OkHttpAsyncHttpClientBuilder() { this.okHttpClient = null; } /** * Creates OkHttpAsyncHttpClientBuilder from the builder of an existing OkHttpClient. * * @param okHttpClient the httpclient */ public OkHttpAsyncHttpClientBuilder(OkHttpClient okHttpClient) { this.okHttpClient = Objects.requireNonNull(okHttpClient, "okHttpClient cannot be null."); } /** * Add a network layer interceptor to Http request pipeline. * * @param networkInterceptor the interceptor to add * @return the updated OkHttpAsyncHttpClientBuilder object */ public OkHttpAsyncHttpClientBuilder addNetworkInterceptor(Interceptor networkInterceptor) { Objects.requireNonNull(networkInterceptor); this.networkInterceptors.add(networkInterceptor); return this; } /** * Add network layer interceptors to Http request pipeline. * * This replaces all previously-set interceptors. * * @param networkInterceptors the interceptors to add * @return the updated OkHttpAsyncHttpClientBuilder object */ /** * Sets the read timeout. * * The default read timeout is 120 seconds. * * @param readTimeout the read timeout * @return the updated OkHttpAsyncHttpClientBuilder object */ public OkHttpAsyncHttpClientBuilder readTimeout(Duration readTimeout) { this.readTimeout = readTimeout; return this; } /** * Sets the connection timeout. * * The default connection timeout is 60 seconds. * * @param connectionTimeout the connection timeout * @return the updated OkHttpAsyncHttpClientBuilder object */ public OkHttpAsyncHttpClientBuilder connectionTimeout(Duration connectionTimeout) { this.connectionTimeout = connectionTimeout; return this; } /** * Sets the Http connection pool. * * @param connectionPool the OkHttp connection pool to use * @return the updated OkHttpAsyncHttpClientBuilder object */ public OkHttpAsyncHttpClientBuilder connectionPool(ConnectionPool connectionPool) { this.connectionPool = Objects.requireNonNull(connectionPool, "connectionPool cannot be null."); return this; } /** * Sets the dispatcher that also composes the thread pool for executing HTTP requests. * * @param dispatcher the dispatcher to use * @return the updated OkHttpAsyncHttpClientBuilder object */ public OkHttpAsyncHttpClientBuilder dispatcher(Dispatcher dispatcher) { this.dispatcher = Objects.requireNonNull(dispatcher, "dispatcher cannot be null."); return this; } /** * Sets the proxy. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.core.http.okhttp.OkHttpAsyncHttpClientBuilder * * @param proxy the proxy * @return the updated OkHttpAsyncHttpClientBuilder object */ public OkHttpAsyncHttpClientBuilder proxy(Proxy proxy) { this.proxy = proxy; return this; } /** * Sets the proxy authenticator. * * @param proxyAuthenticator the proxy authenticator * @return the updated OkHttpAsyncHttpClientBuilder object */ public OkHttpAsyncHttpClientBuilder proxyAuthenticator(Authenticator proxyAuthenticator) { this.proxyAuthenticator = Objects.requireNonNull(proxyAuthenticator, "proxyAuthenticator cannot be null."); return this; } /** * Build a HttpClient with current configurations. * * @return a {@link HttpClient}. */ public HttpClient build() { OkHttpClient.Builder httpClientBuilder = this.okHttpClient == null ? new OkHttpClient.Builder() : this.okHttpClient.newBuilder(); for (Interceptor interceptor : this.networkInterceptors) { httpClientBuilder = httpClientBuilder.addNetworkInterceptor(interceptor); } if (this.readTimeout != null) { httpClientBuilder = httpClientBuilder.readTimeout(this.readTimeout); } else { httpClientBuilder = httpClientBuilder.readTimeout(DEFAULT_READ_TIMEOUT); } if (this.connectionTimeout != null) { httpClientBuilder = httpClientBuilder.connectTimeout(this.connectionTimeout); } else { httpClientBuilder = httpClientBuilder.connectTimeout(DEFAULT_CONNECT_TIMEOUT); } if (this.connectionPool != null) { httpClientBuilder = httpClientBuilder.connectionPool(connectionPool); } if (this.dispatcher != null) { httpClientBuilder = httpClientBuilder.dispatcher(dispatcher); } httpClientBuilder = httpClientBuilder.proxy(this.proxy); if (this.proxyAuthenticator != null) { httpClientBuilder = httpClientBuilder.authenticator(this.proxyAuthenticator); } return new OkHttpAsyncHttpClient(httpClientBuilder.build()); } }
class OkHttpAsyncHttpClientBuilder { private final okhttp3.OkHttpClient okHttpClient; private static final Duration DEFAULT_READ_TIMEOUT = Duration.ofSeconds(120); private static final Duration DEFAULT_CONNECT_TIMEOUT = Duration.ofSeconds(60); private List<Interceptor> networkInterceptors = new ArrayList<>(); private Duration readTimeout; private Duration connectionTimeout; private ConnectionPool connectionPool; private Dispatcher dispatcher; private java.net.Proxy proxy; private Authenticator proxyAuthenticator; /** * Creates OkHttpAsyncHttpClientBuilder. */ public OkHttpAsyncHttpClientBuilder() { this.okHttpClient = null; } /** * Creates OkHttpAsyncHttpClientBuilder from the builder of an existing OkHttpClient. * * @param okHttpClient the httpclient */ public OkHttpAsyncHttpClientBuilder(OkHttpClient okHttpClient) { this.okHttpClient = Objects.requireNonNull(okHttpClient, "okHttpClient cannot be null."); } /** * Add a network layer interceptor to Http request pipeline. * * @param networkInterceptor the interceptor to add * @return the updated OkHttpAsyncHttpClientBuilder object */ public OkHttpAsyncHttpClientBuilder addNetworkInterceptor(Interceptor networkInterceptor) { Objects.requireNonNull(networkInterceptor); this.networkInterceptors.add(networkInterceptor); return this; } /** * Add network layer interceptors to Http request pipeline. * * This replaces all previously-set interceptors. * * @param networkInterceptors the interceptors to add * @return the updated OkHttpAsyncHttpClientBuilder object */ /** * Sets the read timeout. * * The default read timeout is 120 seconds. * * @param readTimeout the read timeout * @return the updated OkHttpAsyncHttpClientBuilder object */ public OkHttpAsyncHttpClientBuilder readTimeout(Duration readTimeout) { this.readTimeout = readTimeout; return this; } /** * Sets the connection timeout. * * The default connection timeout is 60 seconds. * * @param connectionTimeout the connection timeout * @return the updated OkHttpAsyncHttpClientBuilder object */ public OkHttpAsyncHttpClientBuilder connectionTimeout(Duration connectionTimeout) { this.connectionTimeout = connectionTimeout; return this; } /** * Sets the Http connection pool. * * @param connectionPool the OkHttp connection pool to use * @return the updated OkHttpAsyncHttpClientBuilder object */ public OkHttpAsyncHttpClientBuilder connectionPool(ConnectionPool connectionPool) { this.connectionPool = Objects.requireNonNull(connectionPool, "connectionPool cannot be null."); return this; } /** * Sets the dispatcher that also composes the thread pool for executing HTTP requests. * * @param dispatcher the dispatcher to use * @return the updated OkHttpAsyncHttpClientBuilder object */ public OkHttpAsyncHttpClientBuilder dispatcher(Dispatcher dispatcher) { this.dispatcher = Objects.requireNonNull(dispatcher, "dispatcher cannot be null."); return this; } /** * Sets the proxy. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.core.http.okhttp.OkHttpAsyncHttpClientBuilder * * @param proxy the proxy * @return the updated OkHttpAsyncHttpClientBuilder object */ public OkHttpAsyncHttpClientBuilder proxy(Proxy proxy) { this.proxy = proxy; return this; } /** * Sets the proxy authenticator. * * @param proxyAuthenticator the proxy authenticator * @return the updated OkHttpAsyncHttpClientBuilder object */ public OkHttpAsyncHttpClientBuilder proxyAuthenticator(Authenticator proxyAuthenticator) { this.proxyAuthenticator = Objects.requireNonNull(proxyAuthenticator, "proxyAuthenticator cannot be null."); return this; } /** * Build a HttpClient with current configurations. * * @return a {@link HttpClient}. */ public HttpClient build() { OkHttpClient.Builder httpClientBuilder = this.okHttpClient == null ? new OkHttpClient.Builder() : this.okHttpClient.newBuilder(); for (Interceptor interceptor : this.networkInterceptors) { httpClientBuilder = httpClientBuilder.addNetworkInterceptor(interceptor); } if (this.readTimeout != null) { httpClientBuilder = httpClientBuilder.readTimeout(this.readTimeout); } else { httpClientBuilder = httpClientBuilder.readTimeout(DEFAULT_READ_TIMEOUT); } if (this.connectionTimeout != null) { httpClientBuilder = httpClientBuilder.connectTimeout(this.connectionTimeout); } else { httpClientBuilder = httpClientBuilder.connectTimeout(DEFAULT_CONNECT_TIMEOUT); } if (this.connectionPool != null) { httpClientBuilder = httpClientBuilder.connectionPool(connectionPool); } if (this.dispatcher != null) { httpClientBuilder = httpClientBuilder.dispatcher(dispatcher); } httpClientBuilder = httpClientBuilder.proxy(this.proxy); if (this.proxyAuthenticator != null) { httpClientBuilder = httpClientBuilder.authenticator(this.proxyAuthenticator); } return new OkHttpAsyncHttpClient(httpClientBuilder.build()); } }
Is this going to not use Proxy or will this result in failing to create a HttpClient? If we fail to create the HttpClient, it should be logged as error. If we create a client without proxy, then warning is fine.
public NettyAsyncHttpClient build() { HttpClient nettyHttpClient = HttpClient.create() .port(port) .wiretap(enableWiretap) .tcpConfiguration(tcpConfig -> { if (nioEventLoopGroup != null) { tcpConfig = tcpConfig.runOn(nioEventLoopGroup); } if (proxyOptions != null) { ProxyProvider.Proxy nettyProxy; switch (proxyOptions.getType()) { case HTTP: nettyProxy = ProxyProvider.Proxy.HTTP; break; case SOCKS4: nettyProxy = ProxyProvider.Proxy.SOCKS4; break; case SOCKS5: nettyProxy = ProxyProvider.Proxy.SOCKS5; break; default: throw logger.logExceptionAsWarning(new IllegalStateException( String.format("Unknown Proxy type '%s' in use. Not configuring Netty proxy.", proxyOptions.getType()))); } return tcpConfig.proxy(ts -> ts.type(nettyProxy).address(proxyOptions.getAddress())); } return tcpConfig; }); return new NettyAsyncHttpClient(nettyHttpClient); }
throw logger.logExceptionAsWarning(new IllegalStateException(
public NettyAsyncHttpClient build() { HttpClient nettyHttpClient = HttpClient.create() .port(port) .wiretap(enableWiretap) .tcpConfiguration(tcpConfig -> { if (nioEventLoopGroup != null) { tcpConfig = tcpConfig.runOn(nioEventLoopGroup); } if (proxyOptions != null) { ProxyProvider.Proxy nettyProxy; switch (proxyOptions.getType()) { case HTTP: nettyProxy = ProxyProvider.Proxy.HTTP; break; case SOCKS4: nettyProxy = ProxyProvider.Proxy.SOCKS4; break; case SOCKS5: nettyProxy = ProxyProvider.Proxy.SOCKS5; break; default: throw logger.logExceptionAsError(new IllegalStateException( String.format("Unknown Proxy type '%s' in use. Not configuring Netty proxy.", proxyOptions.getType()))); } return tcpConfig.proxy(ts -> ts.type(nettyProxy).address(proxyOptions.getAddress())); } return tcpConfig; }); return new NettyAsyncHttpClient(nettyHttpClient); }
class NettyAsyncHttpClientBuilder { private final ClientLogger logger = new ClientLogger(NettyAsyncHttpClientBuilder.class); private ProxyOptions proxyOptions; private boolean enableWiretap; private int port = 80; private NioEventLoopGroup nioEventLoopGroup; /** * Creates a new builder instance, where a builder is capable of generating multiple instances of * {@link NettyAsyncHttpClient}. */ public NettyAsyncHttpClientBuilder() { } /** * Creates a new {@link NettyAsyncHttpClient} instance on every call, using the configuration set in the builder at * the time of the build method call. * * @return A new NettyAsyncHttpClient instance. * @throws IllegalStateException If the builder is configured to use an unknown proxy type. */ /** * Sets the {@link ProxyOptions proxy options} that the client will use. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.core.http.netty.NettyAsyncHttpClientBuilder * * @param proxyOptions The proxy configuration to use. * @return the updated NettyAsyncHttpClientBuilder object */ public NettyAsyncHttpClientBuilder proxy(ProxyOptions proxyOptions) { this.proxyOptions = proxyOptions; return this; } /** * Enables the Netty wiretap feature. * * @param enableWiretap Flag indicating wiretap status * @return the updated NettyAsyncHttpClientBuilder object */ public NettyAsyncHttpClientBuilder wiretap(boolean enableWiretap) { this.enableWiretap = enableWiretap; return this; } /** * Sets the port which this client should connect, which by default will be set to port 80. * * @param port The port to connect to. * @return the updated NettyAsyncHttpClientBuilder object */ public NettyAsyncHttpClientBuilder port(int port) { this.port = port; return this; } /** * Sets the NIO event loop group that will be used to run IO loops. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.core.http.netty.NettyAsyncHttpClientBuilder * * @param nioEventLoopGroup The {@link NioEventLoopGroup} that will run IO loops. * @return the updated NettyAsyncHttpClientBuilder object */ public NettyAsyncHttpClientBuilder nioEventLoopGroup(NioEventLoopGroup nioEventLoopGroup) { this.nioEventLoopGroup = nioEventLoopGroup; return this; } }
class NettyAsyncHttpClientBuilder { private final ClientLogger logger = new ClientLogger(NettyAsyncHttpClientBuilder.class); private ProxyOptions proxyOptions; private boolean enableWiretap; private int port = 80; private NioEventLoopGroup nioEventLoopGroup; /** * Creates a new builder instance, where a builder is capable of generating multiple instances of * {@link NettyAsyncHttpClient}. */ public NettyAsyncHttpClientBuilder() { } /** * Creates a new {@link NettyAsyncHttpClient} instance on every call, using the configuration set in the builder at * the time of the build method call. * * @return A new NettyAsyncHttpClient instance. * @throws IllegalStateException If the builder is configured to use an unknown proxy type. */ /** * Sets the {@link ProxyOptions proxy options} that the client will use. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.core.http.netty.NettyAsyncHttpClientBuilder * * @param proxyOptions The proxy configuration to use. * @return the updated NettyAsyncHttpClientBuilder object */ public NettyAsyncHttpClientBuilder proxy(ProxyOptions proxyOptions) { this.proxyOptions = proxyOptions; return this; } /** * Enables the Netty wiretap feature. * * @param enableWiretap Flag indicating wiretap status * @return the updated NettyAsyncHttpClientBuilder object */ public NettyAsyncHttpClientBuilder wiretap(boolean enableWiretap) { this.enableWiretap = enableWiretap; return this; } /** * Sets the port which this client should connect, which by default will be set to port 80. * * @param port The port to connect to. * @return the updated NettyAsyncHttpClientBuilder object */ public NettyAsyncHttpClientBuilder port(int port) { this.port = port; return this; } /** * Sets the NIO event loop group that will be used to run IO loops. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.core.http.netty.NettyAsyncHttpClientBuilder * * @param nioEventLoopGroup The {@link NioEventLoopGroup} that will run IO loops. * @return the updated NettyAsyncHttpClientBuilder object */ public NettyAsyncHttpClientBuilder nioEventLoopGroup(NioEventLoopGroup nioEventLoopGroup) { this.nioEventLoopGroup = nioEventLoopGroup; return this; } }
Should this check for both null and empty?
public OkHttpAsyncHttpClientBuilder networkInterceptors(List<Interceptor> networkInterceptors) { this.networkInterceptors = Objects.requireNonNull(networkInterceptors, "networkInterceptors cannot be null."); return this; }
this.networkInterceptors = Objects.requireNonNull(networkInterceptors, "networkInterceptors cannot be null.");
public OkHttpAsyncHttpClientBuilder networkInterceptors(List<Interceptor> networkInterceptors) { this.networkInterceptors = Objects.requireNonNull(networkInterceptors, "networkInterceptors cannot be null."); return this; }
class OkHttpAsyncHttpClientBuilder { private final okhttp3.OkHttpClient okHttpClient; private static final Duration DEFAULT_READ_TIMEOUT = Duration.ofSeconds(120); private static final Duration DEFAULT_CONNECT_TIMEOUT = Duration.ofSeconds(60); private List<Interceptor> networkInterceptors = new ArrayList<>(); private Duration readTimeout; private Duration connectionTimeout; private ConnectionPool connectionPool; private Dispatcher dispatcher; private java.net.Proxy proxy; private Authenticator proxyAuthenticator; /** * Creates OkHttpAsyncHttpClientBuilder. */ public OkHttpAsyncHttpClientBuilder() { this.okHttpClient = null; } /** * Creates OkHttpAsyncHttpClientBuilder from the builder of an existing OkHttpClient. * * @param okHttpClient the httpclient */ public OkHttpAsyncHttpClientBuilder(OkHttpClient okHttpClient) { this.okHttpClient = Objects.requireNonNull(okHttpClient, "okHttpClient cannot be null."); } /** * Add a network layer interceptor to Http request pipeline. * * @param networkInterceptor the interceptor to add * @return the updated OkHttpAsyncHttpClientBuilder object */ public OkHttpAsyncHttpClientBuilder addNetworkInterceptor(Interceptor networkInterceptor) { Objects.requireNonNull(networkInterceptor); this.networkInterceptors.add(networkInterceptor); return this; } /** * Add network layer interceptors to Http request pipeline. * * This replaces all previously-set interceptors. * * @param networkInterceptors the interceptors to add * @return the updated OkHttpAsyncHttpClientBuilder object */ /** * Sets the read timeout. * * The default read timeout is 120 seconds. * * @param readTimeout the read timeout * @return the updated OkHttpAsyncHttpClientBuilder object */ public OkHttpAsyncHttpClientBuilder readTimeout(Duration readTimeout) { this.readTimeout = readTimeout; return this; } /** * Sets the connection timeout. * * The default connection timeout is 60 seconds. * * @param connectionTimeout the connection timeout * @return the updated OkHttpAsyncHttpClientBuilder object */ public OkHttpAsyncHttpClientBuilder connectionTimeout(Duration connectionTimeout) { this.connectionTimeout = connectionTimeout; return this; } /** * Sets the Http connection pool. * * @param connectionPool the OkHttp connection pool to use * @return the updated OkHttpAsyncHttpClientBuilder object */ public OkHttpAsyncHttpClientBuilder connectionPool(ConnectionPool connectionPool) { this.connectionPool = Objects.requireNonNull(connectionPool, "connectionPool cannot be null."); return this; } /** * Sets the dispatcher that also composes the thread pool for executing HTTP requests. * * @param dispatcher the dispatcher to use * @return the updated OkHttpAsyncHttpClientBuilder object */ public OkHttpAsyncHttpClientBuilder dispatcher(Dispatcher dispatcher) { this.dispatcher = Objects.requireNonNull(dispatcher, "dispatcher cannot be null."); return this; } /** * Sets the proxy. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.core.http.okhttp.OkHttpAsyncHttpClientBuilder * * @param proxy the proxy * @return the updated OkHttpAsyncHttpClientBuilder object */ public OkHttpAsyncHttpClientBuilder proxy(Proxy proxy) { this.proxy = proxy; return this; } /** * Sets the proxy authenticator. * * @param proxyAuthenticator the proxy authenticator * @return the updated OkHttpAsyncHttpClientBuilder object */ public OkHttpAsyncHttpClientBuilder proxyAuthenticator(Authenticator proxyAuthenticator) { this.proxyAuthenticator = Objects.requireNonNull(proxyAuthenticator, "proxyAuthenticator cannot be null."); return this; } /** * Build a HttpClient with current configurations. * * @return a {@link HttpClient}. */ public HttpClient build() { OkHttpClient.Builder httpClientBuilder = this.okHttpClient == null ? new OkHttpClient.Builder() : this.okHttpClient.newBuilder(); for (Interceptor interceptor : this.networkInterceptors) { httpClientBuilder = httpClientBuilder.addNetworkInterceptor(interceptor); } if (this.readTimeout != null) { httpClientBuilder = httpClientBuilder.readTimeout(this.readTimeout); } else { httpClientBuilder = httpClientBuilder.readTimeout(DEFAULT_READ_TIMEOUT); } if (this.connectionTimeout != null) { httpClientBuilder = httpClientBuilder.connectTimeout(this.connectionTimeout); } else { httpClientBuilder = httpClientBuilder.connectTimeout(DEFAULT_CONNECT_TIMEOUT); } if (this.connectionPool != null) { httpClientBuilder = httpClientBuilder.connectionPool(connectionPool); } if (this.dispatcher != null) { httpClientBuilder = httpClientBuilder.dispatcher(dispatcher); } httpClientBuilder = httpClientBuilder.proxy(this.proxy); if (this.proxyAuthenticator != null) { httpClientBuilder = httpClientBuilder.authenticator(this.proxyAuthenticator); } return new OkHttpAsyncHttpClient(httpClientBuilder.build()); } }
class OkHttpAsyncHttpClientBuilder { private final okhttp3.OkHttpClient okHttpClient; private static final Duration DEFAULT_READ_TIMEOUT = Duration.ofSeconds(120); private static final Duration DEFAULT_CONNECT_TIMEOUT = Duration.ofSeconds(60); private List<Interceptor> networkInterceptors = new ArrayList<>(); private Duration readTimeout; private Duration connectionTimeout; private ConnectionPool connectionPool; private Dispatcher dispatcher; private java.net.Proxy proxy; private Authenticator proxyAuthenticator; /** * Creates OkHttpAsyncHttpClientBuilder. */ public OkHttpAsyncHttpClientBuilder() { this.okHttpClient = null; } /** * Creates OkHttpAsyncHttpClientBuilder from the builder of an existing OkHttpClient. * * @param okHttpClient the httpclient */ public OkHttpAsyncHttpClientBuilder(OkHttpClient okHttpClient) { this.okHttpClient = Objects.requireNonNull(okHttpClient, "okHttpClient cannot be null."); } /** * Add a network layer interceptor to Http request pipeline. * * @param networkInterceptor the interceptor to add * @return the updated OkHttpAsyncHttpClientBuilder object */ public OkHttpAsyncHttpClientBuilder addNetworkInterceptor(Interceptor networkInterceptor) { Objects.requireNonNull(networkInterceptor); this.networkInterceptors.add(networkInterceptor); return this; } /** * Add network layer interceptors to Http request pipeline. * * This replaces all previously-set interceptors. * * @param networkInterceptors the interceptors to add * @return the updated OkHttpAsyncHttpClientBuilder object */ /** * Sets the read timeout. * * The default read timeout is 120 seconds. * * @param readTimeout the read timeout * @return the updated OkHttpAsyncHttpClientBuilder object */ public OkHttpAsyncHttpClientBuilder readTimeout(Duration readTimeout) { this.readTimeout = readTimeout; return this; } /** * Sets the connection timeout. * * The default connection timeout is 60 seconds. * * @param connectionTimeout the connection timeout * @return the updated OkHttpAsyncHttpClientBuilder object */ public OkHttpAsyncHttpClientBuilder connectionTimeout(Duration connectionTimeout) { this.connectionTimeout = connectionTimeout; return this; } /** * Sets the Http connection pool. * * @param connectionPool the OkHttp connection pool to use * @return the updated OkHttpAsyncHttpClientBuilder object */ public OkHttpAsyncHttpClientBuilder connectionPool(ConnectionPool connectionPool) { this.connectionPool = Objects.requireNonNull(connectionPool, "connectionPool cannot be null."); return this; } /** * Sets the dispatcher that also composes the thread pool for executing HTTP requests. * * @param dispatcher the dispatcher to use * @return the updated OkHttpAsyncHttpClientBuilder object */ public OkHttpAsyncHttpClientBuilder dispatcher(Dispatcher dispatcher) { this.dispatcher = Objects.requireNonNull(dispatcher, "dispatcher cannot be null."); return this; } /** * Sets the proxy. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.core.http.okhttp.OkHttpAsyncHttpClientBuilder * * @param proxy the proxy * @return the updated OkHttpAsyncHttpClientBuilder object */ public OkHttpAsyncHttpClientBuilder proxy(Proxy proxy) { this.proxy = proxy; return this; } /** * Sets the proxy authenticator. * * @param proxyAuthenticator the proxy authenticator * @return the updated OkHttpAsyncHttpClientBuilder object */ public OkHttpAsyncHttpClientBuilder proxyAuthenticator(Authenticator proxyAuthenticator) { this.proxyAuthenticator = Objects.requireNonNull(proxyAuthenticator, "proxyAuthenticator cannot be null."); return this; } /** * Build a HttpClient with current configurations. * * @return a {@link HttpClient}. */ public HttpClient build() { OkHttpClient.Builder httpClientBuilder = this.okHttpClient == null ? new OkHttpClient.Builder() : this.okHttpClient.newBuilder(); for (Interceptor interceptor : this.networkInterceptors) { httpClientBuilder = httpClientBuilder.addNetworkInterceptor(interceptor); } if (this.readTimeout != null) { httpClientBuilder = httpClientBuilder.readTimeout(this.readTimeout); } else { httpClientBuilder = httpClientBuilder.readTimeout(DEFAULT_READ_TIMEOUT); } if (this.connectionTimeout != null) { httpClientBuilder = httpClientBuilder.connectTimeout(this.connectionTimeout); } else { httpClientBuilder = httpClientBuilder.connectTimeout(DEFAULT_CONNECT_TIMEOUT); } if (this.connectionPool != null) { httpClientBuilder = httpClientBuilder.connectionPool(connectionPool); } if (this.dispatcher != null) { httpClientBuilder = httpClientBuilder.dispatcher(dispatcher); } httpClientBuilder = httpClientBuilder.proxy(this.proxy); if (this.proxyAuthenticator != null) { httpClientBuilder = httpClientBuilder.authenticator(this.proxyAuthenticator); } return new OkHttpAsyncHttpClient(httpClientBuilder.build()); } }
In general, I'd limit formatting changes to a separate PR. I have to do a line by line comparison to see what actually changed and it makes it harder to see the functional changes you made.
public Flux<PartitionOwnership> claimOwnership(PartitionOwnership... requestedPartitionOwnerships) { return Flux.fromArray(requestedPartitionOwnerships).flatMap(partitionOwnership -> { String partitionId = partitionOwnership.getPartitionId(); String blobName = getBlobName(partitionOwnership.getEventHubName(), partitionOwnership.getConsumerGroupName(), partitionId); if (!blobClients.containsKey(blobName)) { blobClients.put(blobName, containerAsyncClient.getBlobAsyncClient(blobName)); } BlobAsyncClient blobAsyncClient = blobClients.get(blobName); Metadata metadata = new Metadata(); metadata.put(OWNER_ID, partitionOwnership.getOwnerId()); Long offset = partitionOwnership.getOffset(); metadata.put(OFFSET, offset == null ? null : String.valueOf(offset)); Long sequenceNumber = partitionOwnership.getSequenceNumber(); metadata.put(SEQUENCE_NUMBER, sequenceNumber == null ? null : String.valueOf(sequenceNumber)); BlobAccessConditions blobAccessConditions = new BlobAccessConditions(); if (ImplUtils.isNullOrEmpty(partitionOwnership.getETag())) { blobAccessConditions.setModifiedAccessConditions(new ModifiedAccessConditions().setIfNoneMatch("*")); return blobAsyncClient.asBlockBlobAsyncClient() .uploadWithResponse(Flux.just(UPLOAD_DATA), 0, null, metadata, null, blobAccessConditions) .flatMapMany(response -> updateOwnershipEtag(response, partitionOwnership), error -> { logger.info(CLAIM_ERROR, partitionId, error.getMessage()); return Mono.empty(); }, Mono::empty); } else { blobAccessConditions.setModifiedAccessConditions(new ModifiedAccessConditions() .setIfMatch(partitionOwnership.getETag())); return blobAsyncClient.setMetadataWithResponse(metadata, blobAccessConditions) .flatMapMany(response -> updateOwnershipEtag(response, partitionOwnership), error -> { logger.info(CLAIM_ERROR, partitionId, error.getMessage()); return Mono.empty(); }, Mono::empty); } }); }
return Flux.fromArray(requestedPartitionOwnerships).flatMap(partitionOwnership -> {
public Flux<PartitionOwnership> claimOwnership(PartitionOwnership... requestedPartitionOwnerships) { return Flux.fromArray(requestedPartitionOwnerships).flatMap(partitionOwnership -> { String partitionId = partitionOwnership.getPartitionId(); String blobName = getBlobName(partitionOwnership.getEventHubName(), partitionOwnership.getConsumerGroupName(), partitionId); if (!blobClients.containsKey(blobName)) { blobClients.put(blobName, containerAsyncClient.getBlobAsyncClient(blobName)); } BlobAsyncClient blobAsyncClient = blobClients.get(blobName); Metadata metadata = new Metadata(); metadata.put(OWNER_ID, partitionOwnership.getOwnerId()); Long offset = partitionOwnership.getOffset(); metadata.put(OFFSET, offset == null ? null : String.valueOf(offset)); Long sequenceNumber = partitionOwnership.getSequenceNumber(); metadata.put(SEQUENCE_NUMBER, sequenceNumber == null ? null : String.valueOf(sequenceNumber)); BlobAccessConditions blobAccessConditions = new BlobAccessConditions(); if (ImplUtils.isNullOrEmpty(partitionOwnership.getETag())) { blobAccessConditions.setModifiedAccessConditions(new ModifiedAccessConditions().setIfNoneMatch("*")); return blobAsyncClient.asBlockBlobAsyncClient() .uploadWithResponse(Flux.just(UPLOAD_DATA), 0, null, metadata, null, blobAccessConditions) .flatMapMany(response -> updateOwnershipETag(response, partitionOwnership), error -> { logger.info(CLAIM_ERROR, partitionId, error.getMessage()); return Mono.empty(); }, Mono::empty); } else { blobAccessConditions.setModifiedAccessConditions(new ModifiedAccessConditions() .setIfMatch(partitionOwnership.getETag())); return blobAsyncClient.setMetadataWithResponse(metadata, blobAccessConditions) .flatMapMany(response -> updateOwnershipETag(response, partitionOwnership), error -> { logger.info(CLAIM_ERROR, partitionId, error.getMessage()); return Mono.empty(); }, Mono::empty); } }); }
class BlobPartitionManager implements PartitionManager { private static final String SEQUENCE_NUMBER = "SequenceNumber"; private static final String OFFSET = "Offset"; private static final String OWNER_ID = "OwnerId"; private static final String ETAG = "eTag"; private static final String CLAIM_ERROR = "Couldn't claim ownership of partition {}, error {}"; private static final String BLOB_PATH_SEPARATOR = "/"; private static final ByteBuffer UPLOAD_DATA = ByteBuffer.wrap("".getBytes(UTF_8)); private final ContainerAsyncClient containerAsyncClient; private final ClientLogger logger = new ClientLogger(BlobPartitionManager.class); private final Map<String, BlobAsyncClient> blobClients = new ConcurrentHashMap<>(); /** * Creates an instance of BlobPartitionManager. * * @param containerAsyncClient The {@link ContainerAsyncClient} this instance will use to read and update blobs in * the storage container. */ public BlobPartitionManager(ContainerAsyncClient containerAsyncClient) { this.containerAsyncClient = containerAsyncClient; } /** * This method is called by the {@link EventProcessor} to get the list of all existing partition ownership from the * Storage Blobs. Could return empty results if there are is no existing ownership information. * * @param eventHubName The Event Hub name to get ownership information. * @param consumerGroupName The consumer group name. * @return A flux of partition ownership details of all the partitions that have/had an owner. */ @Override public Flux<PartitionOwnership> listOwnership(String eventHubName, String consumerGroupName) { String prefix = getBlobPrefix(eventHubName, consumerGroupName); BlobListDetails details = new BlobListDetails().setMetadata(true); ListBlobsOptions options = new ListBlobsOptions().setPrefix(prefix).setDetails(details); return containerAsyncClient.listBlobsFlat(options) .filter(blobItem -> blobItem.getName().split(BLOB_PATH_SEPARATOR).length == 3) .map(this::convertToPartitionOwnership); } /** * This method is called by the {@link EventProcessor} to claim ownership of a list of partitions. This will return * the list of partitions that were owned successfully. * * @param requestedPartitionOwnerships Array of partition ownerships this instance is requesting to own. * @return A flux of partitions this instance successfully claimed ownership. */ @Override private Mono<PartitionOwnership> updateOwnershipEtag(Response<?> response, PartitionOwnership ownership) { return Mono.just(ownership.setETag(response.getHeaders().get(ETAG).getValue())); } /** * Updates the checkpoint in Storage Blobs for a partition. * * @param checkpoint Checkpoint information containing sequence number and offset to be stored for this partition. * @return The new ETag on successful update. */ @Override public Mono<String> updateCheckpoint(Checkpoint checkpoint) { if (checkpoint.getSequenceNumber() == null && checkpoint.getOffset() == null) { throw logger.logExceptionAsWarning(Exceptions .propagate(new IllegalStateException( "Both sequence number and offset cannot be null when updating a checkpoint"))); } String partitionId = checkpoint.getPartitionId(); String blobName = getBlobName(checkpoint.getEventHubName(), checkpoint.getConsumerGroupName(), partitionId); if (!blobClients.containsKey(blobName)) { blobClients.put(blobName, containerAsyncClient.getBlobAsyncClient(blobName)); } Metadata metadata = new Metadata(); String sequenceNumber = checkpoint.getSequenceNumber() == null ? null : String.valueOf(checkpoint.getSequenceNumber()); String offset = checkpoint.getOffset() == null ? null : String.valueOf(checkpoint.getOffset()); metadata.put(SEQUENCE_NUMBER, sequenceNumber); metadata.put(OFFSET, offset); metadata.put(OWNER_ID, checkpoint.getOwnerId()); BlobAsyncClient blobAsyncClient = blobClients.get(blobName); BlobAccessConditions blobAccessConditions = new BlobAccessConditions() .setModifiedAccessConditions(new ModifiedAccessConditions().setIfMatch(checkpoint.getETag())); return blobAsyncClient.setMetadataWithResponse(metadata, blobAccessConditions) .map(response -> response.getHeaders().get(ETAG).getValue()); } private String getBlobPrefix(String eventHubName, String consumerGroupName) { return eventHubName + BLOB_PATH_SEPARATOR + consumerGroupName; } private String getBlobName(String eventHubName, String consumerGroupName, String partitionId) { return eventHubName + BLOB_PATH_SEPARATOR + consumerGroupName + BLOB_PATH_SEPARATOR + partitionId; } private PartitionOwnership convertToPartitionOwnership(BlobItem blobItem) { PartitionOwnership partitionOwnership = new PartitionOwnership(); logger.info("Found blob for partition {}", blobItem.getName()); String[] names = blobItem.getName().split(BLOB_PATH_SEPARATOR); partitionOwnership.setEventHubName(names[0]); partitionOwnership.setConsumerGroupName(names[1]); partitionOwnership.setPartitionId(names[2]); if (ImplUtils.isNullOrEmpty(blobItem.getMetadata())) { logger.warning("No metadata available for blob {}", blobItem.getName()); return partitionOwnership; } blobItem.getMetadata().forEach((key, value) -> { switch (key) { case OWNER_ID: partitionOwnership.setOwnerId(value); break; case SEQUENCE_NUMBER: partitionOwnership.setSequenceNumber(Long.valueOf(value)); break; case OFFSET: partitionOwnership.setOffset(Long.valueOf(value)); break; default: break; } }); BlobProperties blobProperties = blobItem.getProperties(); partitionOwnership.setLastModifiedTime(blobProperties.getLastModified().toInstant().toEpochMilli()); partitionOwnership.setETag(blobProperties.getEtag()); return partitionOwnership; } }
class BlobPartitionManager implements PartitionManager { private static final String SEQUENCE_NUMBER = "SequenceNumber"; private static final String OFFSET = "Offset"; private static final String OWNER_ID = "OwnerId"; private static final String ETAG = "eTag"; private static final String CLAIM_ERROR = "Couldn't claim ownership of partition {}, error {}"; private static final String BLOB_PATH_SEPARATOR = "/"; private static final ByteBuffer UPLOAD_DATA = ByteBuffer.wrap("".getBytes(UTF_8)); private final ContainerAsyncClient containerAsyncClient; private final ClientLogger logger = new ClientLogger(BlobPartitionManager.class); private final Map<String, BlobAsyncClient> blobClients = new ConcurrentHashMap<>(); /** * Creates an instance of BlobPartitionManager. * * @param containerAsyncClient The {@link ContainerAsyncClient} this instance will use to read and update blobs in * the storage container. */ public BlobPartitionManager(ContainerAsyncClient containerAsyncClient) { this.containerAsyncClient = containerAsyncClient; } /** * This method is called by the {@link EventProcessor} to get the list of all existing partition ownership from the * Storage Blobs. Could return empty results if there are is no existing ownership information. * * @param eventHubName The Event Hub name to get ownership information. * @param consumerGroupName The consumer group name. * @return A flux of partition ownership details of all the partitions that have/had an owner. */ @Override public Flux<PartitionOwnership> listOwnership(String eventHubName, String consumerGroupName) { String prefix = getBlobPrefix(eventHubName, consumerGroupName); BlobListDetails details = new BlobListDetails().setMetadata(true); ListBlobsOptions options = new ListBlobsOptions().setPrefix(prefix).setDetails(details); return containerAsyncClient.listBlobsFlat(options) .filter(blobItem -> blobItem.getName().split(BLOB_PATH_SEPARATOR).length == 3) .map(this::convertToPartitionOwnership); } /** * This method is called by the {@link EventProcessor} to claim ownership of a list of partitions. This will return * the list of partitions that were owned successfully. * * @param requestedPartitionOwnerships Array of partition ownerships this instance is requesting to own. * @return A flux of partitions this instance successfully claimed ownership. */ @Override private Mono<PartitionOwnership> updateOwnershipETag(Response<?> response, PartitionOwnership ownership) { return Mono.just(ownership.setETag(response.getHeaders().get(ETAG).getValue())); } /** * Updates the checkpoint in Storage Blobs for a partition. * * @param checkpoint Checkpoint information containing sequence number and offset to be stored for this partition. * @return The new ETag on successful update. */ @Override public Mono<String> updateCheckpoint(Checkpoint checkpoint) { if (checkpoint.getSequenceNumber() == null && checkpoint.getOffset() == null) { throw logger.logExceptionAsWarning(Exceptions .propagate(new IllegalStateException( "Both sequence number and offset cannot be null when updating a checkpoint"))); } String partitionId = checkpoint.getPartitionId(); String blobName = getBlobName(checkpoint.getEventHubName(), checkpoint.getConsumerGroupName(), partitionId); if (!blobClients.containsKey(blobName)) { blobClients.put(blobName, containerAsyncClient.getBlobAsyncClient(blobName)); } Metadata metadata = new Metadata(); String sequenceNumber = checkpoint.getSequenceNumber() == null ? null : String.valueOf(checkpoint.getSequenceNumber()); String offset = checkpoint.getOffset() == null ? null : String.valueOf(checkpoint.getOffset()); metadata.put(SEQUENCE_NUMBER, sequenceNumber); metadata.put(OFFSET, offset); metadata.put(OWNER_ID, checkpoint.getOwnerId()); BlobAsyncClient blobAsyncClient = blobClients.get(blobName); BlobAccessConditions blobAccessConditions = new BlobAccessConditions() .setModifiedAccessConditions(new ModifiedAccessConditions().setIfMatch(checkpoint.getETag())); return blobAsyncClient.setMetadataWithResponse(metadata, blobAccessConditions) .map(response -> response.getHeaders().get(ETAG).getValue()); } private String getBlobPrefix(String eventHubName, String consumerGroupName) { return eventHubName + BLOB_PATH_SEPARATOR + consumerGroupName; } private String getBlobName(String eventHubName, String consumerGroupName, String partitionId) { return eventHubName + BLOB_PATH_SEPARATOR + consumerGroupName + BLOB_PATH_SEPARATOR + partitionId; } private PartitionOwnership convertToPartitionOwnership(BlobItem blobItem) { PartitionOwnership partitionOwnership = new PartitionOwnership(); logger.info("Found blob for partition {}", blobItem.getName()); String[] names = blobItem.getName().split(BLOB_PATH_SEPARATOR); partitionOwnership.setEventHubName(names[0]); partitionOwnership.setConsumerGroupName(names[1]); partitionOwnership.setPartitionId(names[2]); if (ImplUtils.isNullOrEmpty(blobItem.getMetadata())) { logger.warning("No metadata available for blob {}", blobItem.getName()); return partitionOwnership; } blobItem.getMetadata().forEach((key, value) -> { switch (key) { case OWNER_ID: partitionOwnership.setOwnerId(value); break; case SEQUENCE_NUMBER: partitionOwnership.setSequenceNumber(Long.valueOf(value)); break; case OFFSET: partitionOwnership.setOffset(Long.valueOf(value)); break; default: break; } }); BlobProperties blobProperties = blobItem.getProperties(); partitionOwnership.setLastModifiedTime(blobProperties.getLastModified().toInstant().toEpochMilli()); partitionOwnership.setETag(blobProperties.getEtag()); return partitionOwnership; } }
Seeing `this` passed as an argument is iffy to me. You can find yourself having a cyclic dependency graph. I'd prefer to pass in the relevant arguments to create that specialised client (ie. connection string, proxy, transport type, etc. etc.)
public PageBlobAsyncClient asPageBlobAsyncClient() { return new SpecializedBlobClientBuilder() .blobAsyncClient(this) .buildPageBlobAsyncClient(); }
.blobAsyncClient(this)
public PageBlobAsyncClient asPageBlobAsyncClient() { return prepareBuilder().buildPageBlobAsyncClient(); }
class BlobAsyncClient extends BlobAsyncClientBase { /** * Package-private constructor for use by {@link BlobClientBuilder}. * * @param azureBlobStorage the API client for blob storage */ BlobAsyncClient(AzureBlobStorageImpl azureBlobStorage, String snapshot, CpkInfo cpk) { super(azureBlobStorage, snapshot, cpk); } /** * Creates a new {@link BlobAsyncClient} linked to the {@code snapshot} of this blob resource. * * @param snapshot the identifier for a specific snapshot of this blob * @return a {@link BlobAsyncClient} used to interact with the specific snapshot. */ @Override public BlobAsyncClient getSnapshotClient(String snapshot) { return new BlobAsyncClient(new AzureBlobStorageBuilder() .url(getBlobUrl().toString()) .pipeline(azureBlobStorage.getHttpPipeline()) .build(), snapshot, cpk); } /** * Creates a new {@link AppendBlobAsyncClient} associated to this blob. * * @return a {@link AppendBlobAsyncClient} associated to this blob. */ public AppendBlobAsyncClient asAppendBlobAsyncClient() { return new SpecializedBlobClientBuilder() .blobAsyncClient(this) .buildAppendBlobAsyncClient(); } /** * Creates a new {@link BlockBlobAsyncClient} associated to this blob. * * @return a {@link BlockBlobAsyncClient} associated to this blob. */ public BlockBlobAsyncClient asBlockBlobAsyncClient() { return new SpecializedBlobClientBuilder() .blobAsyncClient(this) .buildBlockBlobAsyncClient(); } /** * Creates a new {@link PageBlobAsyncClient} associated to this blob. * * @return a {@link PageBlobAsyncClient} associated to this blob. */ }
class BlobAsyncClient extends BlobAsyncClientBase { /** * Package-private constructor for use by {@link BlobClientBuilder}. * * @param azureBlobStorage the API client for blob storage */ BlobAsyncClient(AzureBlobStorageImpl azureBlobStorage, String snapshot, CpkInfo cpk) { super(azureBlobStorage, snapshot, cpk); } /** * Creates a new {@link BlobAsyncClient} linked to the {@code snapshot} of this blob resource. * * @param snapshot the identifier for a specific snapshot of this blob * @return a {@link BlobAsyncClient} used to interact with the specific snapshot. */ @Override public BlobAsyncClient getSnapshotClient(String snapshot) { return new BlobAsyncClient(new AzureBlobStorageBuilder() .url(getBlobUrl().toString()) .pipeline(azureBlobStorage.getHttpPipeline()) .build(), getSnapshotId(), getCustomerProvidedKey()); } /** * Creates a new {@link AppendBlobAsyncClient} associated to this blob. * * @return a {@link AppendBlobAsyncClient} associated to this blob. */ public AppendBlobAsyncClient asAppendBlobAsyncClient() { return prepareBuilder().buildAppendBlobAsyncClient(); } /** * Creates a new {@link BlockBlobAsyncClient} associated to this blob. * * @return a {@link BlockBlobAsyncClient} associated to this blob. */ public BlockBlobAsyncClient asBlockBlobAsyncClient() { return prepareBuilder().buildBlockBlobAsyncClient(); } /** * Creates a new {@link PageBlobAsyncClient} associated to this blob. * * @return a {@link PageBlobAsyncClient} associated to this blob. */ private SpecializedBlobClientBuilder prepareBuilder() { SpecializedBlobClientBuilder builder = new SpecializedBlobClientBuilder() .pipeline(getHttpPipeline()) .endpoint(getBlobUrl().toString()) .snapshot(getSnapshotId()); CpkInfo cpk = getCustomerProvidedKey(); if (cpk != null) { builder.customerProvidedKey(new CustomerProvidedKey(cpk.getEncryptionKey())); } return builder; } }
Yeah, I'd rather have not done these changes but Checkstyle's was complaining a lot about indenting on this file.
public Flux<PartitionOwnership> claimOwnership(PartitionOwnership... requestedPartitionOwnerships) { return Flux.fromArray(requestedPartitionOwnerships).flatMap(partitionOwnership -> { String partitionId = partitionOwnership.getPartitionId(); String blobName = getBlobName(partitionOwnership.getEventHubName(), partitionOwnership.getConsumerGroupName(), partitionId); if (!blobClients.containsKey(blobName)) { blobClients.put(blobName, containerAsyncClient.getBlobAsyncClient(blobName)); } BlobAsyncClient blobAsyncClient = blobClients.get(blobName); Metadata metadata = new Metadata(); metadata.put(OWNER_ID, partitionOwnership.getOwnerId()); Long offset = partitionOwnership.getOffset(); metadata.put(OFFSET, offset == null ? null : String.valueOf(offset)); Long sequenceNumber = partitionOwnership.getSequenceNumber(); metadata.put(SEQUENCE_NUMBER, sequenceNumber == null ? null : String.valueOf(sequenceNumber)); BlobAccessConditions blobAccessConditions = new BlobAccessConditions(); if (ImplUtils.isNullOrEmpty(partitionOwnership.getETag())) { blobAccessConditions.setModifiedAccessConditions(new ModifiedAccessConditions().setIfNoneMatch("*")); return blobAsyncClient.asBlockBlobAsyncClient() .uploadWithResponse(Flux.just(UPLOAD_DATA), 0, null, metadata, null, blobAccessConditions) .flatMapMany(response -> updateOwnershipEtag(response, partitionOwnership), error -> { logger.info(CLAIM_ERROR, partitionId, error.getMessage()); return Mono.empty(); }, Mono::empty); } else { blobAccessConditions.setModifiedAccessConditions(new ModifiedAccessConditions() .setIfMatch(partitionOwnership.getETag())); return blobAsyncClient.setMetadataWithResponse(metadata, blobAccessConditions) .flatMapMany(response -> updateOwnershipEtag(response, partitionOwnership), error -> { logger.info(CLAIM_ERROR, partitionId, error.getMessage()); return Mono.empty(); }, Mono::empty); } }); }
return Flux.fromArray(requestedPartitionOwnerships).flatMap(partitionOwnership -> {
public Flux<PartitionOwnership> claimOwnership(PartitionOwnership... requestedPartitionOwnerships) { return Flux.fromArray(requestedPartitionOwnerships).flatMap(partitionOwnership -> { String partitionId = partitionOwnership.getPartitionId(); String blobName = getBlobName(partitionOwnership.getEventHubName(), partitionOwnership.getConsumerGroupName(), partitionId); if (!blobClients.containsKey(blobName)) { blobClients.put(blobName, containerAsyncClient.getBlobAsyncClient(blobName)); } BlobAsyncClient blobAsyncClient = blobClients.get(blobName); Metadata metadata = new Metadata(); metadata.put(OWNER_ID, partitionOwnership.getOwnerId()); Long offset = partitionOwnership.getOffset(); metadata.put(OFFSET, offset == null ? null : String.valueOf(offset)); Long sequenceNumber = partitionOwnership.getSequenceNumber(); metadata.put(SEQUENCE_NUMBER, sequenceNumber == null ? null : String.valueOf(sequenceNumber)); BlobAccessConditions blobAccessConditions = new BlobAccessConditions(); if (ImplUtils.isNullOrEmpty(partitionOwnership.getETag())) { blobAccessConditions.setModifiedAccessConditions(new ModifiedAccessConditions().setIfNoneMatch("*")); return blobAsyncClient.asBlockBlobAsyncClient() .uploadWithResponse(Flux.just(UPLOAD_DATA), 0, null, metadata, null, blobAccessConditions) .flatMapMany(response -> updateOwnershipETag(response, partitionOwnership), error -> { logger.info(CLAIM_ERROR, partitionId, error.getMessage()); return Mono.empty(); }, Mono::empty); } else { blobAccessConditions.setModifiedAccessConditions(new ModifiedAccessConditions() .setIfMatch(partitionOwnership.getETag())); return blobAsyncClient.setMetadataWithResponse(metadata, blobAccessConditions) .flatMapMany(response -> updateOwnershipETag(response, partitionOwnership), error -> { logger.info(CLAIM_ERROR, partitionId, error.getMessage()); return Mono.empty(); }, Mono::empty); } }); }
class BlobPartitionManager implements PartitionManager { private static final String SEQUENCE_NUMBER = "SequenceNumber"; private static final String OFFSET = "Offset"; private static final String OWNER_ID = "OwnerId"; private static final String ETAG = "eTag"; private static final String CLAIM_ERROR = "Couldn't claim ownership of partition {}, error {}"; private static final String BLOB_PATH_SEPARATOR = "/"; private static final ByteBuffer UPLOAD_DATA = ByteBuffer.wrap("".getBytes(UTF_8)); private final ContainerAsyncClient containerAsyncClient; private final ClientLogger logger = new ClientLogger(BlobPartitionManager.class); private final Map<String, BlobAsyncClient> blobClients = new ConcurrentHashMap<>(); /** * Creates an instance of BlobPartitionManager. * * @param containerAsyncClient The {@link ContainerAsyncClient} this instance will use to read and update blobs in * the storage container. */ public BlobPartitionManager(ContainerAsyncClient containerAsyncClient) { this.containerAsyncClient = containerAsyncClient; } /** * This method is called by the {@link EventProcessor} to get the list of all existing partition ownership from the * Storage Blobs. Could return empty results if there are is no existing ownership information. * * @param eventHubName The Event Hub name to get ownership information. * @param consumerGroupName The consumer group name. * @return A flux of partition ownership details of all the partitions that have/had an owner. */ @Override public Flux<PartitionOwnership> listOwnership(String eventHubName, String consumerGroupName) { String prefix = getBlobPrefix(eventHubName, consumerGroupName); BlobListDetails details = new BlobListDetails().setMetadata(true); ListBlobsOptions options = new ListBlobsOptions().setPrefix(prefix).setDetails(details); return containerAsyncClient.listBlobsFlat(options) .filter(blobItem -> blobItem.getName().split(BLOB_PATH_SEPARATOR).length == 3) .map(this::convertToPartitionOwnership); } /** * This method is called by the {@link EventProcessor} to claim ownership of a list of partitions. This will return * the list of partitions that were owned successfully. * * @param requestedPartitionOwnerships Array of partition ownerships this instance is requesting to own. * @return A flux of partitions this instance successfully claimed ownership. */ @Override private Mono<PartitionOwnership> updateOwnershipEtag(Response<?> response, PartitionOwnership ownership) { return Mono.just(ownership.setETag(response.getHeaders().get(ETAG).getValue())); } /** * Updates the checkpoint in Storage Blobs for a partition. * * @param checkpoint Checkpoint information containing sequence number and offset to be stored for this partition. * @return The new ETag on successful update. */ @Override public Mono<String> updateCheckpoint(Checkpoint checkpoint) { if (checkpoint.getSequenceNumber() == null && checkpoint.getOffset() == null) { throw logger.logExceptionAsWarning(Exceptions .propagate(new IllegalStateException( "Both sequence number and offset cannot be null when updating a checkpoint"))); } String partitionId = checkpoint.getPartitionId(); String blobName = getBlobName(checkpoint.getEventHubName(), checkpoint.getConsumerGroupName(), partitionId); if (!blobClients.containsKey(blobName)) { blobClients.put(blobName, containerAsyncClient.getBlobAsyncClient(blobName)); } Metadata metadata = new Metadata(); String sequenceNumber = checkpoint.getSequenceNumber() == null ? null : String.valueOf(checkpoint.getSequenceNumber()); String offset = checkpoint.getOffset() == null ? null : String.valueOf(checkpoint.getOffset()); metadata.put(SEQUENCE_NUMBER, sequenceNumber); metadata.put(OFFSET, offset); metadata.put(OWNER_ID, checkpoint.getOwnerId()); BlobAsyncClient blobAsyncClient = blobClients.get(blobName); BlobAccessConditions blobAccessConditions = new BlobAccessConditions() .setModifiedAccessConditions(new ModifiedAccessConditions().setIfMatch(checkpoint.getETag())); return blobAsyncClient.setMetadataWithResponse(metadata, blobAccessConditions) .map(response -> response.getHeaders().get(ETAG).getValue()); } private String getBlobPrefix(String eventHubName, String consumerGroupName) { return eventHubName + BLOB_PATH_SEPARATOR + consumerGroupName; } private String getBlobName(String eventHubName, String consumerGroupName, String partitionId) { return eventHubName + BLOB_PATH_SEPARATOR + consumerGroupName + BLOB_PATH_SEPARATOR + partitionId; } private PartitionOwnership convertToPartitionOwnership(BlobItem blobItem) { PartitionOwnership partitionOwnership = new PartitionOwnership(); logger.info("Found blob for partition {}", blobItem.getName()); String[] names = blobItem.getName().split(BLOB_PATH_SEPARATOR); partitionOwnership.setEventHubName(names[0]); partitionOwnership.setConsumerGroupName(names[1]); partitionOwnership.setPartitionId(names[2]); if (ImplUtils.isNullOrEmpty(blobItem.getMetadata())) { logger.warning("No metadata available for blob {}", blobItem.getName()); return partitionOwnership; } blobItem.getMetadata().forEach((key, value) -> { switch (key) { case OWNER_ID: partitionOwnership.setOwnerId(value); break; case SEQUENCE_NUMBER: partitionOwnership.setSequenceNumber(Long.valueOf(value)); break; case OFFSET: partitionOwnership.setOffset(Long.valueOf(value)); break; default: break; } }); BlobProperties blobProperties = blobItem.getProperties(); partitionOwnership.setLastModifiedTime(blobProperties.getLastModified().toInstant().toEpochMilli()); partitionOwnership.setETag(blobProperties.getEtag()); return partitionOwnership; } }
class BlobPartitionManager implements PartitionManager { private static final String SEQUENCE_NUMBER = "SequenceNumber"; private static final String OFFSET = "Offset"; private static final String OWNER_ID = "OwnerId"; private static final String ETAG = "eTag"; private static final String CLAIM_ERROR = "Couldn't claim ownership of partition {}, error {}"; private static final String BLOB_PATH_SEPARATOR = "/"; private static final ByteBuffer UPLOAD_DATA = ByteBuffer.wrap("".getBytes(UTF_8)); private final ContainerAsyncClient containerAsyncClient; private final ClientLogger logger = new ClientLogger(BlobPartitionManager.class); private final Map<String, BlobAsyncClient> blobClients = new ConcurrentHashMap<>(); /** * Creates an instance of BlobPartitionManager. * * @param containerAsyncClient The {@link ContainerAsyncClient} this instance will use to read and update blobs in * the storage container. */ public BlobPartitionManager(ContainerAsyncClient containerAsyncClient) { this.containerAsyncClient = containerAsyncClient; } /** * This method is called by the {@link EventProcessor} to get the list of all existing partition ownership from the * Storage Blobs. Could return empty results if there are is no existing ownership information. * * @param eventHubName The Event Hub name to get ownership information. * @param consumerGroupName The consumer group name. * @return A flux of partition ownership details of all the partitions that have/had an owner. */ @Override public Flux<PartitionOwnership> listOwnership(String eventHubName, String consumerGroupName) { String prefix = getBlobPrefix(eventHubName, consumerGroupName); BlobListDetails details = new BlobListDetails().setMetadata(true); ListBlobsOptions options = new ListBlobsOptions().setPrefix(prefix).setDetails(details); return containerAsyncClient.listBlobsFlat(options) .filter(blobItem -> blobItem.getName().split(BLOB_PATH_SEPARATOR).length == 3) .map(this::convertToPartitionOwnership); } /** * This method is called by the {@link EventProcessor} to claim ownership of a list of partitions. This will return * the list of partitions that were owned successfully. * * @param requestedPartitionOwnerships Array of partition ownerships this instance is requesting to own. * @return A flux of partitions this instance successfully claimed ownership. */ @Override private Mono<PartitionOwnership> updateOwnershipETag(Response<?> response, PartitionOwnership ownership) { return Mono.just(ownership.setETag(response.getHeaders().get(ETAG).getValue())); } /** * Updates the checkpoint in Storage Blobs for a partition. * * @param checkpoint Checkpoint information containing sequence number and offset to be stored for this partition. * @return The new ETag on successful update. */ @Override public Mono<String> updateCheckpoint(Checkpoint checkpoint) { if (checkpoint.getSequenceNumber() == null && checkpoint.getOffset() == null) { throw logger.logExceptionAsWarning(Exceptions .propagate(new IllegalStateException( "Both sequence number and offset cannot be null when updating a checkpoint"))); } String partitionId = checkpoint.getPartitionId(); String blobName = getBlobName(checkpoint.getEventHubName(), checkpoint.getConsumerGroupName(), partitionId); if (!blobClients.containsKey(blobName)) { blobClients.put(blobName, containerAsyncClient.getBlobAsyncClient(blobName)); } Metadata metadata = new Metadata(); String sequenceNumber = checkpoint.getSequenceNumber() == null ? null : String.valueOf(checkpoint.getSequenceNumber()); String offset = checkpoint.getOffset() == null ? null : String.valueOf(checkpoint.getOffset()); metadata.put(SEQUENCE_NUMBER, sequenceNumber); metadata.put(OFFSET, offset); metadata.put(OWNER_ID, checkpoint.getOwnerId()); BlobAsyncClient blobAsyncClient = blobClients.get(blobName); BlobAccessConditions blobAccessConditions = new BlobAccessConditions() .setModifiedAccessConditions(new ModifiedAccessConditions().setIfMatch(checkpoint.getETag())); return blobAsyncClient.setMetadataWithResponse(metadata, blobAccessConditions) .map(response -> response.getHeaders().get(ETAG).getValue()); } private String getBlobPrefix(String eventHubName, String consumerGroupName) { return eventHubName + BLOB_PATH_SEPARATOR + consumerGroupName; } private String getBlobName(String eventHubName, String consumerGroupName, String partitionId) { return eventHubName + BLOB_PATH_SEPARATOR + consumerGroupName + BLOB_PATH_SEPARATOR + partitionId; } private PartitionOwnership convertToPartitionOwnership(BlobItem blobItem) { PartitionOwnership partitionOwnership = new PartitionOwnership(); logger.info("Found blob for partition {}", blobItem.getName()); String[] names = blobItem.getName().split(BLOB_PATH_SEPARATOR); partitionOwnership.setEventHubName(names[0]); partitionOwnership.setConsumerGroupName(names[1]); partitionOwnership.setPartitionId(names[2]); if (ImplUtils.isNullOrEmpty(blobItem.getMetadata())) { logger.warning("No metadata available for blob {}", blobItem.getName()); return partitionOwnership; } blobItem.getMetadata().forEach((key, value) -> { switch (key) { case OWNER_ID: partitionOwnership.setOwnerId(value); break; case SEQUENCE_NUMBER: partitionOwnership.setSequenceNumber(Long.valueOf(value)); break; case OFFSET: partitionOwnership.setOffset(Long.valueOf(value)); break; default: break; } }); BlobProperties blobProperties = blobItem.getProperties(); partitionOwnership.setLastModifiedTime(blobProperties.getLastModified().toInstant().toEpochMilli()); partitionOwnership.setETag(blobProperties.getEtag()); return partitionOwnership; } }
I agree passing `this` is always terrifying (especially coming from doing a lot of JS development), but the builder API allows passing of a BlobClient and this is the simplest way to go about that.
public PageBlobAsyncClient asPageBlobAsyncClient() { return new SpecializedBlobClientBuilder() .blobAsyncClient(this) .buildPageBlobAsyncClient(); }
.blobAsyncClient(this)
public PageBlobAsyncClient asPageBlobAsyncClient() { return prepareBuilder().buildPageBlobAsyncClient(); }
class BlobAsyncClient extends BlobAsyncClientBase { /** * Package-private constructor for use by {@link BlobClientBuilder}. * * @param azureBlobStorage the API client for blob storage */ BlobAsyncClient(AzureBlobStorageImpl azureBlobStorage, String snapshot, CpkInfo cpk) { super(azureBlobStorage, snapshot, cpk); } /** * Creates a new {@link BlobAsyncClient} linked to the {@code snapshot} of this blob resource. * * @param snapshot the identifier for a specific snapshot of this blob * @return a {@link BlobAsyncClient} used to interact with the specific snapshot. */ @Override public BlobAsyncClient getSnapshotClient(String snapshot) { return new BlobAsyncClient(new AzureBlobStorageBuilder() .url(getBlobUrl().toString()) .pipeline(azureBlobStorage.getHttpPipeline()) .build(), snapshot, cpk); } /** * Creates a new {@link AppendBlobAsyncClient} associated to this blob. * * @return a {@link AppendBlobAsyncClient} associated to this blob. */ public AppendBlobAsyncClient asAppendBlobAsyncClient() { return new SpecializedBlobClientBuilder() .blobAsyncClient(this) .buildAppendBlobAsyncClient(); } /** * Creates a new {@link BlockBlobAsyncClient} associated to this blob. * * @return a {@link BlockBlobAsyncClient} associated to this blob. */ public BlockBlobAsyncClient asBlockBlobAsyncClient() { return new SpecializedBlobClientBuilder() .blobAsyncClient(this) .buildBlockBlobAsyncClient(); } /** * Creates a new {@link PageBlobAsyncClient} associated to this blob. * * @return a {@link PageBlobAsyncClient} associated to this blob. */ }
class BlobAsyncClient extends BlobAsyncClientBase { /** * Package-private constructor for use by {@link BlobClientBuilder}. * * @param azureBlobStorage the API client for blob storage */ BlobAsyncClient(AzureBlobStorageImpl azureBlobStorage, String snapshot, CpkInfo cpk) { super(azureBlobStorage, snapshot, cpk); } /** * Creates a new {@link BlobAsyncClient} linked to the {@code snapshot} of this blob resource. * * @param snapshot the identifier for a specific snapshot of this blob * @return a {@link BlobAsyncClient} used to interact with the specific snapshot. */ @Override public BlobAsyncClient getSnapshotClient(String snapshot) { return new BlobAsyncClient(new AzureBlobStorageBuilder() .url(getBlobUrl().toString()) .pipeline(azureBlobStorage.getHttpPipeline()) .build(), getSnapshotId(), getCustomerProvidedKey()); } /** * Creates a new {@link AppendBlobAsyncClient} associated to this blob. * * @return a {@link AppendBlobAsyncClient} associated to this blob. */ public AppendBlobAsyncClient asAppendBlobAsyncClient() { return prepareBuilder().buildAppendBlobAsyncClient(); } /** * Creates a new {@link BlockBlobAsyncClient} associated to this blob. * * @return a {@link BlockBlobAsyncClient} associated to this blob. */ public BlockBlobAsyncClient asBlockBlobAsyncClient() { return prepareBuilder().buildBlockBlobAsyncClient(); } /** * Creates a new {@link PageBlobAsyncClient} associated to this blob. * * @return a {@link PageBlobAsyncClient} associated to this blob. */ private SpecializedBlobClientBuilder prepareBuilder() { SpecializedBlobClientBuilder builder = new SpecializedBlobClientBuilder() .pipeline(getHttpPipeline()) .endpoint(getBlobUrl().toString()) .snapshot(getSnapshotId()); CpkInfo cpk = getCustomerProvidedKey(); if (cpk != null) { builder.customerProvidedKey(new CustomerProvidedKey(cpk.getEncryptionKey())); } return builder; } }
Simplest may not be the best design choice even if the builder supports it because we control the builder API.
public PageBlobAsyncClient asPageBlobAsyncClient() { return new SpecializedBlobClientBuilder() .blobAsyncClient(this) .buildPageBlobAsyncClient(); }
.blobAsyncClient(this)
public PageBlobAsyncClient asPageBlobAsyncClient() { return prepareBuilder().buildPageBlobAsyncClient(); }
class BlobAsyncClient extends BlobAsyncClientBase { /** * Package-private constructor for use by {@link BlobClientBuilder}. * * @param azureBlobStorage the API client for blob storage */ BlobAsyncClient(AzureBlobStorageImpl azureBlobStorage, String snapshot, CpkInfo cpk) { super(azureBlobStorage, snapshot, cpk); } /** * Creates a new {@link BlobAsyncClient} linked to the {@code snapshot} of this blob resource. * * @param snapshot the identifier for a specific snapshot of this blob * @return a {@link BlobAsyncClient} used to interact with the specific snapshot. */ @Override public BlobAsyncClient getSnapshotClient(String snapshot) { return new BlobAsyncClient(new AzureBlobStorageBuilder() .url(getBlobUrl().toString()) .pipeline(azureBlobStorage.getHttpPipeline()) .build(), snapshot, cpk); } /** * Creates a new {@link AppendBlobAsyncClient} associated to this blob. * * @return a {@link AppendBlobAsyncClient} associated to this blob. */ public AppendBlobAsyncClient asAppendBlobAsyncClient() { return new SpecializedBlobClientBuilder() .blobAsyncClient(this) .buildAppendBlobAsyncClient(); } /** * Creates a new {@link BlockBlobAsyncClient} associated to this blob. * * @return a {@link BlockBlobAsyncClient} associated to this blob. */ public BlockBlobAsyncClient asBlockBlobAsyncClient() { return new SpecializedBlobClientBuilder() .blobAsyncClient(this) .buildBlockBlobAsyncClient(); } /** * Creates a new {@link PageBlobAsyncClient} associated to this blob. * * @return a {@link PageBlobAsyncClient} associated to this blob. */ }
class BlobAsyncClient extends BlobAsyncClientBase { /** * Package-private constructor for use by {@link BlobClientBuilder}. * * @param azureBlobStorage the API client for blob storage */ BlobAsyncClient(AzureBlobStorageImpl azureBlobStorage, String snapshot, CpkInfo cpk) { super(azureBlobStorage, snapshot, cpk); } /** * Creates a new {@link BlobAsyncClient} linked to the {@code snapshot} of this blob resource. * * @param snapshot the identifier for a specific snapshot of this blob * @return a {@link BlobAsyncClient} used to interact with the specific snapshot. */ @Override public BlobAsyncClient getSnapshotClient(String snapshot) { return new BlobAsyncClient(new AzureBlobStorageBuilder() .url(getBlobUrl().toString()) .pipeline(azureBlobStorage.getHttpPipeline()) .build(), getSnapshotId(), getCustomerProvidedKey()); } /** * Creates a new {@link AppendBlobAsyncClient} associated to this blob. * * @return a {@link AppendBlobAsyncClient} associated to this blob. */ public AppendBlobAsyncClient asAppendBlobAsyncClient() { return prepareBuilder().buildAppendBlobAsyncClient(); } /** * Creates a new {@link BlockBlobAsyncClient} associated to this blob. * * @return a {@link BlockBlobAsyncClient} associated to this blob. */ public BlockBlobAsyncClient asBlockBlobAsyncClient() { return prepareBuilder().buildBlockBlobAsyncClient(); } /** * Creates a new {@link PageBlobAsyncClient} associated to this blob. * * @return a {@link PageBlobAsyncClient} associated to this blob. */ private SpecializedBlobClientBuilder prepareBuilder() { SpecializedBlobClientBuilder builder = new SpecializedBlobClientBuilder() .pipeline(getHttpPipeline()) .endpoint(getBlobUrl().toString()) .snapshot(getSnapshotId()); CpkInfo cpk = getCustomerProvidedKey(); if (cpk != null) { builder.customerProvidedKey(new CustomerProvidedKey(cpk.getEncryptionKey())); } return builder; } }
Changed the builder to allow individual pieces to be passed
public PageBlobAsyncClient asPageBlobAsyncClient() { return new SpecializedBlobClientBuilder() .blobAsyncClient(this) .buildPageBlobAsyncClient(); }
.blobAsyncClient(this)
public PageBlobAsyncClient asPageBlobAsyncClient() { return prepareBuilder().buildPageBlobAsyncClient(); }
class BlobAsyncClient extends BlobAsyncClientBase { /** * Package-private constructor for use by {@link BlobClientBuilder}. * * @param azureBlobStorage the API client for blob storage */ BlobAsyncClient(AzureBlobStorageImpl azureBlobStorage, String snapshot, CpkInfo cpk) { super(azureBlobStorage, snapshot, cpk); } /** * Creates a new {@link BlobAsyncClient} linked to the {@code snapshot} of this blob resource. * * @param snapshot the identifier for a specific snapshot of this blob * @return a {@link BlobAsyncClient} used to interact with the specific snapshot. */ @Override public BlobAsyncClient getSnapshotClient(String snapshot) { return new BlobAsyncClient(new AzureBlobStorageBuilder() .url(getBlobUrl().toString()) .pipeline(azureBlobStorage.getHttpPipeline()) .build(), snapshot, cpk); } /** * Creates a new {@link AppendBlobAsyncClient} associated to this blob. * * @return a {@link AppendBlobAsyncClient} associated to this blob. */ public AppendBlobAsyncClient asAppendBlobAsyncClient() { return new SpecializedBlobClientBuilder() .blobAsyncClient(this) .buildAppendBlobAsyncClient(); } /** * Creates a new {@link BlockBlobAsyncClient} associated to this blob. * * @return a {@link BlockBlobAsyncClient} associated to this blob. */ public BlockBlobAsyncClient asBlockBlobAsyncClient() { return new SpecializedBlobClientBuilder() .blobAsyncClient(this) .buildBlockBlobAsyncClient(); } /** * Creates a new {@link PageBlobAsyncClient} associated to this blob. * * @return a {@link PageBlobAsyncClient} associated to this blob. */ }
class BlobAsyncClient extends BlobAsyncClientBase { /** * Package-private constructor for use by {@link BlobClientBuilder}. * * @param azureBlobStorage the API client for blob storage */ BlobAsyncClient(AzureBlobStorageImpl azureBlobStorage, String snapshot, CpkInfo cpk) { super(azureBlobStorage, snapshot, cpk); } /** * Creates a new {@link BlobAsyncClient} linked to the {@code snapshot} of this blob resource. * * @param snapshot the identifier for a specific snapshot of this blob * @return a {@link BlobAsyncClient} used to interact with the specific snapshot. */ @Override public BlobAsyncClient getSnapshotClient(String snapshot) { return new BlobAsyncClient(new AzureBlobStorageBuilder() .url(getBlobUrl().toString()) .pipeline(azureBlobStorage.getHttpPipeline()) .build(), getSnapshotId(), getCustomerProvidedKey()); } /** * Creates a new {@link AppendBlobAsyncClient} associated to this blob. * * @return a {@link AppendBlobAsyncClient} associated to this blob. */ public AppendBlobAsyncClient asAppendBlobAsyncClient() { return prepareBuilder().buildAppendBlobAsyncClient(); } /** * Creates a new {@link BlockBlobAsyncClient} associated to this blob. * * @return a {@link BlockBlobAsyncClient} associated to this blob. */ public BlockBlobAsyncClient asBlockBlobAsyncClient() { return prepareBuilder().buildBlockBlobAsyncClient(); } /** * Creates a new {@link PageBlobAsyncClient} associated to this blob. * * @return a {@link PageBlobAsyncClient} associated to this blob. */ private SpecializedBlobClientBuilder prepareBuilder() { SpecializedBlobClientBuilder builder = new SpecializedBlobClientBuilder() .pipeline(getHttpPipeline()) .endpoint(getBlobUrl().toString()) .snapshot(getSnapshotId()); CpkInfo cpk = getCustomerProvidedKey(); if (cpk != null) { builder.customerProvidedKey(new CustomerProvidedKey(cpk.getEncryptionKey())); } return builder; } }
Add a message. Without it, you just get a NullPointerException with no message. ie. "'containerName' cannot be null." Same with another usages of this.
public BlobAsyncClient buildBlobAsyncClient() { Objects.requireNonNull(containerName); Objects.requireNonNull(blobName); HttpPipeline pipeline = super.getPipeline(); if (pipeline == null) { pipeline = super.buildPipeline(); } return new BlobAsyncClient(new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint, containerName, blobName)) .pipeline(pipeline) .build(), snapshot, cpk); }
Objects.requireNonNull(containerName);
public BlobAsyncClient buildBlobAsyncClient() { Objects.requireNonNull(containerName, "'containerName' cannot be null."); Objects.requireNonNull(blobName, "'blobName' cannot be null."); HttpPipeline pipeline = super.getPipeline(); if (pipeline == null) { pipeline = super.buildPipeline(); } return new BlobAsyncClient(new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint, containerName, blobName)) .pipeline(pipeline) .build(), snapshot, customerProvidedKey); }
class BlobClientBuilder extends BaseBlobClientBuilder<BlobClientBuilder> { private final ClientLogger logger = new ClientLogger(BlobClientBuilder.class); private String containerName; private String blobName; private String snapshot; /** * Creates a builder instance that is able to configure and construct {@link BlobClient BlobClients} and * {@link BlobAsyncClient BlobAsyncClients}. */ public BlobClientBuilder() { } /** * Creates a {@link BlobClient} based on options set in the Builder. BlobClients are used to perform generic blob * methods such as {@link BlobClient * properties}, use this when the blob type is unknown. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.buildBlobClient} * * @return a {@link BlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ public BlobClient buildBlobClient() { return new BlobClient(buildBlobAsyncClient()); } /** * Creates a {@link BlobAsyncClient} based on options set in the Builder. BlobAsyncClients are used to perform * generic blob methods such as {@link BlobAsyncClient * BlobAsyncClient * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.buildBlobAsyncClient} * * @return a {@link BlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.endpoint * * @param endpoint URL of the service * @return the updated BlobClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ @Override public BlobClientBuilder endpoint(String endpoint) { try { URL url = new URL(endpoint); BlobURLParts parts = BlobURLParts.parse(url); this.endpoint = parts.getScheme() + ": this.containerName = parts.getContainerName(); this.blobName = parts.getBlobName(); this.snapshot = parts.getSnapshot(); SASTokenCredential sasTokenCredential = SASTokenCredential.fromSASTokenString(parts.getSasQueryParameters().encode()); if (sasTokenCredential != null) { super.credential(sasTokenCredential); } } catch (MalformedURLException ex) { throw logger.logExceptionAsError( new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.")); } return this; } /** * Sets the name of the container this client is connecting to. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.containerName * * @param containerName the name of the container * @return the updated BlobClientBuilder object * @throws NullPointerException If {@code containerName} is {@code null} */ public BlobClientBuilder containerName(String containerName) { this.containerName = Objects.requireNonNull(containerName); return this; } /** * Sets the name of the blob this client is connecting to. * * @param blobName the name of the blob * @return the updated BlobClientBuilder object * @throws NullPointerException If {@code blobName} is {@code null} */ public BlobClientBuilder blobName(String blobName) { this.blobName = Objects.requireNonNull(blobName); return this; } /** * Sets the snapshot of the blob this client is connecting to. * * @param snapshot the snapshot identifier for the blob * @return the updated BlobClientBuilder object */ public BlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } @Override protected Class<BlobClientBuilder> getClazz() { return BlobClientBuilder.class; } }
class BlobClientBuilder extends BaseBlobClientBuilder<BlobClientBuilder> { private final ClientLogger logger = new ClientLogger(BlobClientBuilder.class); private String containerName; private String blobName; private String snapshot; /** * Creates a builder instance that is able to configure and construct {@link BlobClient BlobClients} and * {@link BlobAsyncClient BlobAsyncClients}. */ public BlobClientBuilder() { } /** * Creates a {@link BlobClient} based on options set in the Builder. BlobClients are used to perform generic blob * methods such as {@link BlobClient * properties}, use this when the blob type is unknown. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.buildBlobClient} * * @return a {@link BlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ public BlobClient buildBlobClient() { return new BlobClient(buildBlobAsyncClient()); } /** * Creates a {@link BlobAsyncClient} based on options set in the Builder. BlobAsyncClients are used to perform * generic blob methods such as {@link BlobAsyncClient * BlobAsyncClient * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.buildBlobAsyncClient} * * @return a {@link BlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.endpoint * * @param endpoint URL of the service * @return the updated BlobClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ @Override public BlobClientBuilder endpoint(String endpoint) { try { URL url = new URL(endpoint); BlobURLParts parts = BlobURLParts.parse(url); this.endpoint = parts.getScheme() + ": this.containerName = parts.getContainerName(); this.blobName = parts.getBlobName(); this.snapshot = parts.getSnapshot(); SASTokenCredential sasTokenCredential = SASTokenCredential.fromSASTokenString(parts.getSasQueryParameters().encode()); if (sasTokenCredential != null) { super.credential(sasTokenCredential); } } catch (MalformedURLException ex) { throw logger.logExceptionAsError( new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.")); } return this; } /** * Sets the name of the container this client is connecting to. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.containerName * * @param containerName the name of the container * @return the updated BlobClientBuilder object * @throws NullPointerException If {@code containerName} is {@code null} */ public BlobClientBuilder containerName(String containerName) { this.containerName = Objects.requireNonNull(containerName, "'containerName' cannot be null."); return this; } /** * Sets the name of the blob this client is connecting to. * * @param blobName the name of the blob * @return the updated BlobClientBuilder object * @throws NullPointerException If {@code blobName} is {@code null} */ public BlobClientBuilder blobName(String blobName) { this.blobName = Objects.requireNonNull(blobName, "'blobName' cannot be null."); return this; } /** * Sets the snapshot of the blob this client is connecting to. * * @param snapshot the snapshot identifier for the blob * @return the updated BlobClientBuilder object */ public BlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } @Override protected Class<BlobClientBuilder> getClazz() { return BlobClientBuilder.class; } }
What is `cpk`? This isn't a very intuitive variable name.
public BlobAsyncClient buildBlobAsyncClient() { Objects.requireNonNull(containerName); Objects.requireNonNull(blobName); HttpPipeline pipeline = super.getPipeline(); if (pipeline == null) { pipeline = super.buildPipeline(); } return new BlobAsyncClient(new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint, containerName, blobName)) .pipeline(pipeline) .build(), snapshot, cpk); }
.build(), snapshot, cpk);
public BlobAsyncClient buildBlobAsyncClient() { Objects.requireNonNull(containerName, "'containerName' cannot be null."); Objects.requireNonNull(blobName, "'blobName' cannot be null."); HttpPipeline pipeline = super.getPipeline(); if (pipeline == null) { pipeline = super.buildPipeline(); } return new BlobAsyncClient(new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint, containerName, blobName)) .pipeline(pipeline) .build(), snapshot, customerProvidedKey); }
class BlobClientBuilder extends BaseBlobClientBuilder<BlobClientBuilder> { private final ClientLogger logger = new ClientLogger(BlobClientBuilder.class); private String containerName; private String blobName; private String snapshot; /** * Creates a builder instance that is able to configure and construct {@link BlobClient BlobClients} and * {@link BlobAsyncClient BlobAsyncClients}. */ public BlobClientBuilder() { } /** * Creates a {@link BlobClient} based on options set in the Builder. BlobClients are used to perform generic blob * methods such as {@link BlobClient * properties}, use this when the blob type is unknown. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.buildBlobClient} * * @return a {@link BlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ public BlobClient buildBlobClient() { return new BlobClient(buildBlobAsyncClient()); } /** * Creates a {@link BlobAsyncClient} based on options set in the Builder. BlobAsyncClients are used to perform * generic blob methods such as {@link BlobAsyncClient * BlobAsyncClient * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.buildBlobAsyncClient} * * @return a {@link BlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.endpoint * * @param endpoint URL of the service * @return the updated BlobClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ @Override public BlobClientBuilder endpoint(String endpoint) { try { URL url = new URL(endpoint); BlobURLParts parts = BlobURLParts.parse(url); this.endpoint = parts.getScheme() + ": this.containerName = parts.getContainerName(); this.blobName = parts.getBlobName(); this.snapshot = parts.getSnapshot(); SASTokenCredential sasTokenCredential = SASTokenCredential.fromSASTokenString(parts.getSasQueryParameters().encode()); if (sasTokenCredential != null) { super.credential(sasTokenCredential); } } catch (MalformedURLException ex) { throw logger.logExceptionAsError( new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.")); } return this; } /** * Sets the name of the container this client is connecting to. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.containerName * * @param containerName the name of the container * @return the updated BlobClientBuilder object * @throws NullPointerException If {@code containerName} is {@code null} */ public BlobClientBuilder containerName(String containerName) { this.containerName = Objects.requireNonNull(containerName); return this; } /** * Sets the name of the blob this client is connecting to. * * @param blobName the name of the blob * @return the updated BlobClientBuilder object * @throws NullPointerException If {@code blobName} is {@code null} */ public BlobClientBuilder blobName(String blobName) { this.blobName = Objects.requireNonNull(blobName); return this; } /** * Sets the snapshot of the blob this client is connecting to. * * @param snapshot the snapshot identifier for the blob * @return the updated BlobClientBuilder object */ public BlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } @Override protected Class<BlobClientBuilder> getClazz() { return BlobClientBuilder.class; } }
class BlobClientBuilder extends BaseBlobClientBuilder<BlobClientBuilder> { private final ClientLogger logger = new ClientLogger(BlobClientBuilder.class); private String containerName; private String blobName; private String snapshot; /** * Creates a builder instance that is able to configure and construct {@link BlobClient BlobClients} and * {@link BlobAsyncClient BlobAsyncClients}. */ public BlobClientBuilder() { } /** * Creates a {@link BlobClient} based on options set in the Builder. BlobClients are used to perform generic blob * methods such as {@link BlobClient * properties}, use this when the blob type is unknown. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.buildBlobClient} * * @return a {@link BlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ public BlobClient buildBlobClient() { return new BlobClient(buildBlobAsyncClient()); } /** * Creates a {@link BlobAsyncClient} based on options set in the Builder. BlobAsyncClients are used to perform * generic blob methods such as {@link BlobAsyncClient * BlobAsyncClient * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.buildBlobAsyncClient} * * @return a {@link BlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.endpoint * * @param endpoint URL of the service * @return the updated BlobClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ @Override public BlobClientBuilder endpoint(String endpoint) { try { URL url = new URL(endpoint); BlobURLParts parts = BlobURLParts.parse(url); this.endpoint = parts.getScheme() + ": this.containerName = parts.getContainerName(); this.blobName = parts.getBlobName(); this.snapshot = parts.getSnapshot(); SASTokenCredential sasTokenCredential = SASTokenCredential.fromSASTokenString(parts.getSasQueryParameters().encode()); if (sasTokenCredential != null) { super.credential(sasTokenCredential); } } catch (MalformedURLException ex) { throw logger.logExceptionAsError( new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.")); } return this; } /** * Sets the name of the container this client is connecting to. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.containerName * * @param containerName the name of the container * @return the updated BlobClientBuilder object * @throws NullPointerException If {@code containerName} is {@code null} */ public BlobClientBuilder containerName(String containerName) { this.containerName = Objects.requireNonNull(containerName, "'containerName' cannot be null."); return this; } /** * Sets the name of the blob this client is connecting to. * * @param blobName the name of the blob * @return the updated BlobClientBuilder object * @throws NullPointerException If {@code blobName} is {@code null} */ public BlobClientBuilder blobName(String blobName) { this.blobName = Objects.requireNonNull(blobName, "'blobName' cannot be null."); return this; } /** * Sets the snapshot of the blob this client is connecting to. * * @param snapshot the snapshot identifier for the blob * @return the updated BlobClientBuilder object */ public BlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } @Override protected Class<BlobClientBuilder> getClazz() { return BlobClientBuilder.class; } }
We need reference to this feature name in some sense, as we need to easily differentiate it from client side encryption, the previous feature where customers managed their encryption keys, before the service was able to do that work for the customer.
public BlobAsyncClient buildBlobAsyncClient() { Objects.requireNonNull(containerName); Objects.requireNonNull(blobName); HttpPipeline pipeline = super.getPipeline(); if (pipeline == null) { pipeline = super.buildPipeline(); } return new BlobAsyncClient(new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint, containerName, blobName)) .pipeline(pipeline) .build(), snapshot, cpk); }
.build(), snapshot, cpk);
public BlobAsyncClient buildBlobAsyncClient() { Objects.requireNonNull(containerName, "'containerName' cannot be null."); Objects.requireNonNull(blobName, "'blobName' cannot be null."); HttpPipeline pipeline = super.getPipeline(); if (pipeline == null) { pipeline = super.buildPipeline(); } return new BlobAsyncClient(new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint, containerName, blobName)) .pipeline(pipeline) .build(), snapshot, customerProvidedKey); }
class BlobClientBuilder extends BaseBlobClientBuilder<BlobClientBuilder> { private final ClientLogger logger = new ClientLogger(BlobClientBuilder.class); private String containerName; private String blobName; private String snapshot; /** * Creates a builder instance that is able to configure and construct {@link BlobClient BlobClients} and * {@link BlobAsyncClient BlobAsyncClients}. */ public BlobClientBuilder() { } /** * Creates a {@link BlobClient} based on options set in the Builder. BlobClients are used to perform generic blob * methods such as {@link BlobClient * properties}, use this when the blob type is unknown. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.buildBlobClient} * * @return a {@link BlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ public BlobClient buildBlobClient() { return new BlobClient(buildBlobAsyncClient()); } /** * Creates a {@link BlobAsyncClient} based on options set in the Builder. BlobAsyncClients are used to perform * generic blob methods such as {@link BlobAsyncClient * BlobAsyncClient * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.buildBlobAsyncClient} * * @return a {@link BlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.endpoint * * @param endpoint URL of the service * @return the updated BlobClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ @Override public BlobClientBuilder endpoint(String endpoint) { try { URL url = new URL(endpoint); BlobURLParts parts = BlobURLParts.parse(url); this.endpoint = parts.getScheme() + ": this.containerName = parts.getContainerName(); this.blobName = parts.getBlobName(); this.snapshot = parts.getSnapshot(); SASTokenCredential sasTokenCredential = SASTokenCredential.fromSASTokenString(parts.getSasQueryParameters().encode()); if (sasTokenCredential != null) { super.credential(sasTokenCredential); } } catch (MalformedURLException ex) { throw logger.logExceptionAsError( new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.")); } return this; } /** * Sets the name of the container this client is connecting to. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.containerName * * @param containerName the name of the container * @return the updated BlobClientBuilder object * @throws NullPointerException If {@code containerName} is {@code null} */ public BlobClientBuilder containerName(String containerName) { this.containerName = Objects.requireNonNull(containerName); return this; } /** * Sets the name of the blob this client is connecting to. * * @param blobName the name of the blob * @return the updated BlobClientBuilder object * @throws NullPointerException If {@code blobName} is {@code null} */ public BlobClientBuilder blobName(String blobName) { this.blobName = Objects.requireNonNull(blobName); return this; } /** * Sets the snapshot of the blob this client is connecting to. * * @param snapshot the snapshot identifier for the blob * @return the updated BlobClientBuilder object */ public BlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } @Override protected Class<BlobClientBuilder> getClazz() { return BlobClientBuilder.class; } }
class BlobClientBuilder extends BaseBlobClientBuilder<BlobClientBuilder> { private final ClientLogger logger = new ClientLogger(BlobClientBuilder.class); private String containerName; private String blobName; private String snapshot; /** * Creates a builder instance that is able to configure and construct {@link BlobClient BlobClients} and * {@link BlobAsyncClient BlobAsyncClients}. */ public BlobClientBuilder() { } /** * Creates a {@link BlobClient} based on options set in the Builder. BlobClients are used to perform generic blob * methods such as {@link BlobClient * properties}, use this when the blob type is unknown. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.buildBlobClient} * * @return a {@link BlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ public BlobClient buildBlobClient() { return new BlobClient(buildBlobAsyncClient()); } /** * Creates a {@link BlobAsyncClient} based on options set in the Builder. BlobAsyncClients are used to perform * generic blob methods such as {@link BlobAsyncClient * BlobAsyncClient * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.buildBlobAsyncClient} * * @return a {@link BlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.endpoint * * @param endpoint URL of the service * @return the updated BlobClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ @Override public BlobClientBuilder endpoint(String endpoint) { try { URL url = new URL(endpoint); BlobURLParts parts = BlobURLParts.parse(url); this.endpoint = parts.getScheme() + ": this.containerName = parts.getContainerName(); this.blobName = parts.getBlobName(); this.snapshot = parts.getSnapshot(); SASTokenCredential sasTokenCredential = SASTokenCredential.fromSASTokenString(parts.getSasQueryParameters().encode()); if (sasTokenCredential != null) { super.credential(sasTokenCredential); } } catch (MalformedURLException ex) { throw logger.logExceptionAsError( new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.")); } return this; } /** * Sets the name of the container this client is connecting to. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.containerName * * @param containerName the name of the container * @return the updated BlobClientBuilder object * @throws NullPointerException If {@code containerName} is {@code null} */ public BlobClientBuilder containerName(String containerName) { this.containerName = Objects.requireNonNull(containerName, "'containerName' cannot be null."); return this; } /** * Sets the name of the blob this client is connecting to. * * @param blobName the name of the blob * @return the updated BlobClientBuilder object * @throws NullPointerException If {@code blobName} is {@code null} */ public BlobClientBuilder blobName(String blobName) { this.blobName = Objects.requireNonNull(blobName, "'blobName' cannot be null."); return this; } /** * Sets the snapshot of the blob this client is connecting to. * * @param snapshot the snapshot identifier for the blob * @return the updated BlobClientBuilder object */ public BlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } @Override protected Class<BlobClientBuilder> getClazz() { return BlobClientBuilder.class; } }
Changed field and getter to customerProvidedKey and getCustomerProvidedKey.
public BlobAsyncClient buildBlobAsyncClient() { Objects.requireNonNull(containerName); Objects.requireNonNull(blobName); HttpPipeline pipeline = super.getPipeline(); if (pipeline == null) { pipeline = super.buildPipeline(); } return new BlobAsyncClient(new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint, containerName, blobName)) .pipeline(pipeline) .build(), snapshot, cpk); }
.build(), snapshot, cpk);
public BlobAsyncClient buildBlobAsyncClient() { Objects.requireNonNull(containerName, "'containerName' cannot be null."); Objects.requireNonNull(blobName, "'blobName' cannot be null."); HttpPipeline pipeline = super.getPipeline(); if (pipeline == null) { pipeline = super.buildPipeline(); } return new BlobAsyncClient(new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint, containerName, blobName)) .pipeline(pipeline) .build(), snapshot, customerProvidedKey); }
class BlobClientBuilder extends BaseBlobClientBuilder<BlobClientBuilder> { private final ClientLogger logger = new ClientLogger(BlobClientBuilder.class); private String containerName; private String blobName; private String snapshot; /** * Creates a builder instance that is able to configure and construct {@link BlobClient BlobClients} and * {@link BlobAsyncClient BlobAsyncClients}. */ public BlobClientBuilder() { } /** * Creates a {@link BlobClient} based on options set in the Builder. BlobClients are used to perform generic blob * methods such as {@link BlobClient * properties}, use this when the blob type is unknown. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.buildBlobClient} * * @return a {@link BlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ public BlobClient buildBlobClient() { return new BlobClient(buildBlobAsyncClient()); } /** * Creates a {@link BlobAsyncClient} based on options set in the Builder. BlobAsyncClients are used to perform * generic blob methods such as {@link BlobAsyncClient * BlobAsyncClient * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.buildBlobAsyncClient} * * @return a {@link BlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.endpoint * * @param endpoint URL of the service * @return the updated BlobClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ @Override public BlobClientBuilder endpoint(String endpoint) { try { URL url = new URL(endpoint); BlobURLParts parts = BlobURLParts.parse(url); this.endpoint = parts.getScheme() + ": this.containerName = parts.getContainerName(); this.blobName = parts.getBlobName(); this.snapshot = parts.getSnapshot(); SASTokenCredential sasTokenCredential = SASTokenCredential.fromSASTokenString(parts.getSasQueryParameters().encode()); if (sasTokenCredential != null) { super.credential(sasTokenCredential); } } catch (MalformedURLException ex) { throw logger.logExceptionAsError( new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.")); } return this; } /** * Sets the name of the container this client is connecting to. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.containerName * * @param containerName the name of the container * @return the updated BlobClientBuilder object * @throws NullPointerException If {@code containerName} is {@code null} */ public BlobClientBuilder containerName(String containerName) { this.containerName = Objects.requireNonNull(containerName); return this; } /** * Sets the name of the blob this client is connecting to. * * @param blobName the name of the blob * @return the updated BlobClientBuilder object * @throws NullPointerException If {@code blobName} is {@code null} */ public BlobClientBuilder blobName(String blobName) { this.blobName = Objects.requireNonNull(blobName); return this; } /** * Sets the snapshot of the blob this client is connecting to. * * @param snapshot the snapshot identifier for the blob * @return the updated BlobClientBuilder object */ public BlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } @Override protected Class<BlobClientBuilder> getClazz() { return BlobClientBuilder.class; } }
class BlobClientBuilder extends BaseBlobClientBuilder<BlobClientBuilder> { private final ClientLogger logger = new ClientLogger(BlobClientBuilder.class); private String containerName; private String blobName; private String snapshot; /** * Creates a builder instance that is able to configure and construct {@link BlobClient BlobClients} and * {@link BlobAsyncClient BlobAsyncClients}. */ public BlobClientBuilder() { } /** * Creates a {@link BlobClient} based on options set in the Builder. BlobClients are used to perform generic blob * methods such as {@link BlobClient * properties}, use this when the blob type is unknown. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.buildBlobClient} * * @return a {@link BlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ public BlobClient buildBlobClient() { return new BlobClient(buildBlobAsyncClient()); } /** * Creates a {@link BlobAsyncClient} based on options set in the Builder. BlobAsyncClients are used to perform * generic blob methods such as {@link BlobAsyncClient * BlobAsyncClient * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.buildBlobAsyncClient} * * @return a {@link BlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.endpoint * * @param endpoint URL of the service * @return the updated BlobClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ @Override public BlobClientBuilder endpoint(String endpoint) { try { URL url = new URL(endpoint); BlobURLParts parts = BlobURLParts.parse(url); this.endpoint = parts.getScheme() + ": this.containerName = parts.getContainerName(); this.blobName = parts.getBlobName(); this.snapshot = parts.getSnapshot(); SASTokenCredential sasTokenCredential = SASTokenCredential.fromSASTokenString(parts.getSasQueryParameters().encode()); if (sasTokenCredential != null) { super.credential(sasTokenCredential); } } catch (MalformedURLException ex) { throw logger.logExceptionAsError( new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.")); } return this; } /** * Sets the name of the container this client is connecting to. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.containerName * * @param containerName the name of the container * @return the updated BlobClientBuilder object * @throws NullPointerException If {@code containerName} is {@code null} */ public BlobClientBuilder containerName(String containerName) { this.containerName = Objects.requireNonNull(containerName, "'containerName' cannot be null."); return this; } /** * Sets the name of the blob this client is connecting to. * * @param blobName the name of the blob * @return the updated BlobClientBuilder object * @throws NullPointerException If {@code blobName} is {@code null} */ public BlobClientBuilder blobName(String blobName) { this.blobName = Objects.requireNonNull(blobName, "'blobName' cannot be null."); return this; } /** * Sets the snapshot of the blob this client is connecting to. * * @param snapshot the snapshot identifier for the blob * @return the updated BlobClientBuilder object */ public BlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } @Override protected Class<BlobClientBuilder> getClazz() { return BlobClientBuilder.class; } }
Added message
public BlobAsyncClient buildBlobAsyncClient() { Objects.requireNonNull(containerName); Objects.requireNonNull(blobName); HttpPipeline pipeline = super.getPipeline(); if (pipeline == null) { pipeline = super.buildPipeline(); } return new BlobAsyncClient(new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint, containerName, blobName)) .pipeline(pipeline) .build(), snapshot, cpk); }
Objects.requireNonNull(containerName);
public BlobAsyncClient buildBlobAsyncClient() { Objects.requireNonNull(containerName, "'containerName' cannot be null."); Objects.requireNonNull(blobName, "'blobName' cannot be null."); HttpPipeline pipeline = super.getPipeline(); if (pipeline == null) { pipeline = super.buildPipeline(); } return new BlobAsyncClient(new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint, containerName, blobName)) .pipeline(pipeline) .build(), snapshot, customerProvidedKey); }
class BlobClientBuilder extends BaseBlobClientBuilder<BlobClientBuilder> { private final ClientLogger logger = new ClientLogger(BlobClientBuilder.class); private String containerName; private String blobName; private String snapshot; /** * Creates a builder instance that is able to configure and construct {@link BlobClient BlobClients} and * {@link BlobAsyncClient BlobAsyncClients}. */ public BlobClientBuilder() { } /** * Creates a {@link BlobClient} based on options set in the Builder. BlobClients are used to perform generic blob * methods such as {@link BlobClient * properties}, use this when the blob type is unknown. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.buildBlobClient} * * @return a {@link BlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ public BlobClient buildBlobClient() { return new BlobClient(buildBlobAsyncClient()); } /** * Creates a {@link BlobAsyncClient} based on options set in the Builder. BlobAsyncClients are used to perform * generic blob methods such as {@link BlobAsyncClient * BlobAsyncClient * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.buildBlobAsyncClient} * * @return a {@link BlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.endpoint * * @param endpoint URL of the service * @return the updated BlobClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ @Override public BlobClientBuilder endpoint(String endpoint) { try { URL url = new URL(endpoint); BlobURLParts parts = BlobURLParts.parse(url); this.endpoint = parts.getScheme() + ": this.containerName = parts.getContainerName(); this.blobName = parts.getBlobName(); this.snapshot = parts.getSnapshot(); SASTokenCredential sasTokenCredential = SASTokenCredential.fromSASTokenString(parts.getSasQueryParameters().encode()); if (sasTokenCredential != null) { super.credential(sasTokenCredential); } } catch (MalformedURLException ex) { throw logger.logExceptionAsError( new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.")); } return this; } /** * Sets the name of the container this client is connecting to. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.containerName * * @param containerName the name of the container * @return the updated BlobClientBuilder object * @throws NullPointerException If {@code containerName} is {@code null} */ public BlobClientBuilder containerName(String containerName) { this.containerName = Objects.requireNonNull(containerName); return this; } /** * Sets the name of the blob this client is connecting to. * * @param blobName the name of the blob * @return the updated BlobClientBuilder object * @throws NullPointerException If {@code blobName} is {@code null} */ public BlobClientBuilder blobName(String blobName) { this.blobName = Objects.requireNonNull(blobName); return this; } /** * Sets the snapshot of the blob this client is connecting to. * * @param snapshot the snapshot identifier for the blob * @return the updated BlobClientBuilder object */ public BlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } @Override protected Class<BlobClientBuilder> getClazz() { return BlobClientBuilder.class; } }
class BlobClientBuilder extends BaseBlobClientBuilder<BlobClientBuilder> { private final ClientLogger logger = new ClientLogger(BlobClientBuilder.class); private String containerName; private String blobName; private String snapshot; /** * Creates a builder instance that is able to configure and construct {@link BlobClient BlobClients} and * {@link BlobAsyncClient BlobAsyncClients}. */ public BlobClientBuilder() { } /** * Creates a {@link BlobClient} based on options set in the Builder. BlobClients are used to perform generic blob * methods such as {@link BlobClient * properties}, use this when the blob type is unknown. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.buildBlobClient} * * @return a {@link BlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ public BlobClient buildBlobClient() { return new BlobClient(buildBlobAsyncClient()); } /** * Creates a {@link BlobAsyncClient} based on options set in the Builder. BlobAsyncClients are used to perform * generic blob methods such as {@link BlobAsyncClient * BlobAsyncClient * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.buildBlobAsyncClient} * * @return a {@link BlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. */ /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.endpoint * * @param endpoint URL of the service * @return the updated BlobClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ @Override public BlobClientBuilder endpoint(String endpoint) { try { URL url = new URL(endpoint); BlobURLParts parts = BlobURLParts.parse(url); this.endpoint = parts.getScheme() + ": this.containerName = parts.getContainerName(); this.blobName = parts.getBlobName(); this.snapshot = parts.getSnapshot(); SASTokenCredential sasTokenCredential = SASTokenCredential.fromSASTokenString(parts.getSasQueryParameters().encode()); if (sasTokenCredential != null) { super.credential(sasTokenCredential); } } catch (MalformedURLException ex) { throw logger.logExceptionAsError( new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.")); } return this; } /** * Sets the name of the container this client is connecting to. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.containerName * * @param containerName the name of the container * @return the updated BlobClientBuilder object * @throws NullPointerException If {@code containerName} is {@code null} */ public BlobClientBuilder containerName(String containerName) { this.containerName = Objects.requireNonNull(containerName, "'containerName' cannot be null."); return this; } /** * Sets the name of the blob this client is connecting to. * * @param blobName the name of the blob * @return the updated BlobClientBuilder object * @throws NullPointerException If {@code blobName} is {@code null} */ public BlobClientBuilder blobName(String blobName) { this.blobName = Objects.requireNonNull(blobName, "'blobName' cannot be null."); return this; } /** * Sets the snapshot of the blob this client is connecting to. * * @param snapshot the snapshot identifier for the blob * @return the updated BlobClientBuilder object */ public BlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } @Override protected Class<BlobClientBuilder> getClazz() { return BlobClientBuilder.class; } }
These two return two different results. Original one is : 2 ^ (tryCount-1) Now: (tryCount -1) ^ 2 What if tryCount = 1, does delay expect to be negative?
long calculateDelayInMs(int tryCount) { long delay; switch (this.retryPolicyType) { case EXPONENTIAL: delay = ((tryCount - 1) * (tryCount - 1) - 1L) * this.retryDelayInMs; break; case FIXED: delay = this.retryDelayInMs; break; default: throw logger.logExceptionAsError(new IllegalArgumentException("Invalid retry policy type.")); } return Math.min(delay, this.maxRetryDelayInMs); }
delay = ((tryCount - 1) * (tryCount - 1) - 1L) * this.retryDelayInMs;
long calculateDelayInMs(int tryCount) { long delay; switch (this.retryPolicyType) { case EXPONENTIAL: delay = ((tryCount - 1) * (tryCount - 1) - 1L) * this.retryDelayInMs; break; case FIXED: delay = this.retryDelayInMs; break; default: throw logger.logExceptionAsError(new IllegalArgumentException("Invalid retry policy type.")); } return Math.min(delay, this.maxRetryDelayInMs); }
class RequestRetryOptions { private final ClientLogger logger = new ClientLogger(RequestRetryOptions.class); private final int maxTries; private final int tryTimeout; private final long retryDelayInMs; private final long maxRetryDelayInMs; private final RetryPolicyType retryPolicyType; private final String secondaryHost; /** * Configures how the {@link HttpPipeline} should retry requests. */ public RequestRetryOptions() { this(RetryPolicyType.EXPONENTIAL, null, null, null, null, null); } /** * Configures how the {@link HttpPipeline} should retry requests. * * @param retryPolicyType Optional. A {@link RetryPolicyType} specifying the type of retry pattern to use, default * value is {@link RetryPolicyType * @param maxTries Optional. Maximum number of attempts an operation will be retried, default is {@code 4}. * @param tryTimeout Optional. Specified the maximum time allowed before a request is cancelled and assumed failed, * default is {@link Integer * * <p>This value should be based on the bandwidth available to the host machine and proximity to the Storage * service, a good starting point may be 60 seconds per MB of anticipated payload size.</p> * @param retryDelayInMs Optional. Specifies the amount of delay to use before retrying an operation, default value * is {@code 4ms} when {@code retryPolicyType} is {@link RetryPolicyType * when {@code retryPolicyType} is {@link RetryPolicyType * @param maxRetryDelayInMs Optional. Specifies the maximum delay allowed before retrying an operation, default * value is {@code 120ms}. * @param secondaryHost Optional. Specified a secondary Storage account to retry requests against, default is none. * * <p>Before setting this understand the issues around reading stale and potentially-inconsistent data, view these * <a href=https: * for more information.</p> * @throws IllegalArgumentException If {@code retryDelayInMs} and {@code maxRetryDelayInMs} are not both null or * non-null or {@code retryPolicyType} isn't {@link RetryPolicyType */ public RequestRetryOptions(RetryPolicyType retryPolicyType, Integer maxTries, Integer tryTimeout, Long retryDelayInMs, Long maxRetryDelayInMs, String secondaryHost) { this.retryPolicyType = retryPolicyType == null ? RetryPolicyType.EXPONENTIAL : retryPolicyType; if (maxTries != null) { Utility.assertInBounds("maxRetries", maxTries, 1, Integer.MAX_VALUE); this.maxTries = maxTries; } else { this.maxTries = 4; } if (tryTimeout != null) { Utility.assertInBounds("tryTimeout", tryTimeout, 1, Integer.MAX_VALUE); this.tryTimeout = tryTimeout; } else { this.tryTimeout = Integer.MAX_VALUE; } if ((retryDelayInMs == null && maxRetryDelayInMs != null) || (retryDelayInMs != null && maxRetryDelayInMs == null)) { throw logger.logExceptionAsError( new IllegalArgumentException("Both retryDelay and maxRetryDelay must be null or neither can be null")); } if (retryDelayInMs != null) { Utility.assertInBounds("maxRetryDelayInMs", maxRetryDelayInMs, 1, Long.MAX_VALUE); Utility.assertInBounds("retryDelayInMs", retryDelayInMs, 1, maxRetryDelayInMs); this.maxRetryDelayInMs = maxRetryDelayInMs; this.retryDelayInMs = retryDelayInMs; } else { switch (this.retryPolicyType) { case EXPONENTIAL: this.retryDelayInMs = TimeUnit.SECONDS.toMillis(4); break; case FIXED: this.retryDelayInMs = TimeUnit.SECONDS.toMillis(30); break; default: throw logger.logExceptionAsError(new IllegalArgumentException("Invalid 'RetryPolicyType'.")); } this.maxRetryDelayInMs = TimeUnit.SECONDS.toMillis(120); } this.secondaryHost = secondaryHost; } /** * @return the maximum number of retries that will be attempted. */ public int maxTries() { return this.maxTries; } /** * @return the maximum time, in seconds, allowed for a request until it is considered timed out. */ public int tryTimeout() { return this.tryTimeout; } /** * @return the URI of the secondary host where retries are attempted. If this is null then there is no secondary * host and all retries are attempted against the original host. */ public String secondaryHost() { return this.secondaryHost; } /** * @return the delay in milliseconds between each retry attempt. */ public long retryDelayInMs() { return retryDelayInMs; } /** * @return the maximum delay in milliseconds allowed between each retry. */ public long maxRetryDelayInMs() { return maxRetryDelayInMs; } /** * Calculates how long to delay before sending the next request. * * @param tryCount An {@code int} indicating which try we are on. * @return A {@code long} value of how many milliseconds to delay. */ }
class RequestRetryOptions { private final ClientLogger logger = new ClientLogger(RequestRetryOptions.class); private final int maxTries; private final int tryTimeout; private final long retryDelayInMs; private final long maxRetryDelayInMs; private final RetryPolicyType retryPolicyType; private final String secondaryHost; /** * Configures how the {@link HttpPipeline} should retry requests. */ public RequestRetryOptions() { this(RetryPolicyType.EXPONENTIAL, null, null, null, null, null); } /** * Configures how the {@link HttpPipeline} should retry requests. * * @param retryPolicyType Optional. A {@link RetryPolicyType} specifying the type of retry pattern to use, default * value is {@link RetryPolicyType * @param maxTries Optional. Maximum number of attempts an operation will be retried, default is {@code 4}. * @param tryTimeout Optional. Specified the maximum time allowed before a request is cancelled and assumed failed, * default is {@link Integer * * <p>This value should be based on the bandwidth available to the host machine and proximity to the Storage * service, a good starting point may be 60 seconds per MB of anticipated payload size.</p> * @param retryDelayInMs Optional. Specifies the amount of delay to use before retrying an operation, default value * is {@code 4ms} when {@code retryPolicyType} is {@link RetryPolicyType * when {@code retryPolicyType} is {@link RetryPolicyType * @param maxRetryDelayInMs Optional. Specifies the maximum delay allowed before retrying an operation, default * value is {@code 120ms}. * @param secondaryHost Optional. Specified a secondary Storage account to retry requests against, default is none. * * <p>Before setting this understand the issues around reading stale and potentially-inconsistent data, view these * <a href=https: * for more information.</p> * @throws IllegalArgumentException If {@code retryDelayInMs} and {@code maxRetryDelayInMs} are not both null or * non-null or {@code retryPolicyType} isn't {@link RetryPolicyType */ public RequestRetryOptions(RetryPolicyType retryPolicyType, Integer maxTries, Integer tryTimeout, Long retryDelayInMs, Long maxRetryDelayInMs, String secondaryHost) { this.retryPolicyType = retryPolicyType == null ? RetryPolicyType.EXPONENTIAL : retryPolicyType; if (maxTries != null) { Utility.assertInBounds("maxRetries", maxTries, 1, Integer.MAX_VALUE); this.maxTries = maxTries; } else { this.maxTries = 4; } if (tryTimeout != null) { Utility.assertInBounds("tryTimeout", tryTimeout, 1, Integer.MAX_VALUE); this.tryTimeout = tryTimeout; } else { this.tryTimeout = Integer.MAX_VALUE; } if ((retryDelayInMs == null && maxRetryDelayInMs != null) || (retryDelayInMs != null && maxRetryDelayInMs == null)) { throw logger.logExceptionAsError( new IllegalArgumentException("Both retryDelay and maxRetryDelay must be null or neither can be null")); } if (retryDelayInMs != null) { Utility.assertInBounds("maxRetryDelayInMs", maxRetryDelayInMs, 1, Long.MAX_VALUE); Utility.assertInBounds("retryDelayInMs", retryDelayInMs, 1, maxRetryDelayInMs); this.maxRetryDelayInMs = maxRetryDelayInMs; this.retryDelayInMs = retryDelayInMs; } else { switch (this.retryPolicyType) { case EXPONENTIAL: this.retryDelayInMs = TimeUnit.SECONDS.toMillis(4); break; case FIXED: this.retryDelayInMs = TimeUnit.SECONDS.toMillis(30); break; default: throw logger.logExceptionAsError(new IllegalArgumentException("Invalid 'RetryPolicyType'.")); } this.maxRetryDelayInMs = TimeUnit.SECONDS.toMillis(120); } this.secondaryHost = secondaryHost; } /** * @return the maximum number of retries that will be attempted. */ public int maxTries() { return this.maxTries; } /** * @return the maximum time, in seconds, allowed for a request until it is considered timed out. */ public int tryTimeout() { return this.tryTimeout; } /** * @return the URI of the secondary host where retries are attempted. If this is null then there is no secondary * host and all retries are attempted against the original host. */ public String secondaryHost() { return this.secondaryHost; } /** * @return the delay in milliseconds between each retry attempt. */ public long retryDelayInMs() { return retryDelayInMs; } /** * @return the maximum delay in milliseconds allowed between each retry. */ public long maxRetryDelayInMs() { return maxRetryDelayInMs; } /** * Calculates how long to delay before sending the next request. * * @param tryCount An {@code int} indicating which try we are on. * @return A {@code long} value of how many milliseconds to delay. */ }
What's the reason of the switching 500 to 1000? Same as below. Any justification on this?
private OffsetDateTime calcUpperBound(OffsetDateTime start, int primaryTryNumber, boolean tryingPrimary) { if (tryingPrimary) { return start.plus(calcPrimaryDelay(primaryTryNumber) * 1000 + 1000, ChronoUnit.MILLIS); } else { return start.plus(1500, ChronoUnit.MILLIS); } }
return start.plus(calcPrimaryDelay(primaryTryNumber) * 1000 + 1000, ChronoUnit.MILLIS);
private OffsetDateTime calcUpperBound(OffsetDateTime start, int primaryTryNumber, boolean tryingPrimary) { if (tryingPrimary) { return start.plus(calcPrimaryDelay(primaryTryNumber) * 1000 + 1000, ChronoUnit.MILLIS); } else { return start.plus(1500, ChronoUnit.MILLIS); } }
class RetryTestClient implements HttpClient { private RequestRetryTestFactory factory; RetryTestClient(RequestRetryTestFactory parent) { this.factory = parent; } @Override public Mono<HttpResponse> send(HttpRequest request) { this.factory.tryNumber++; if (this.factory.tryNumber > this.factory.options.maxTries()) { throw new IllegalArgumentException("Try number has exceeded max tries"); } String expectedHost = RETRY_TEST_PRIMARY_HOST; if (this.factory.tryNumber % 2 == 0) { /* Special cases: retry until success scenario fail's on the 4th try with a 404 on the secondary, so we never expect it to check the secondary after that. All other tests should continue to check the secondary. Exponential timing only tests secondary backoff once but uses the rest of the retries to hit the max delay. */ if (!((this.factory.retryTestScenario == RequestRetryTestFactory.RETRY_TEST_SCENARIO_RETRY_UNTIL_SUCCESS && this.factory.tryNumber > 4) || (this.factory.retryTestScenario == RequestRetryTestFactory.RETRY_TEST_SCENARIO_EXPONENTIAL_TIMING && this.factory.tryNumber > 2))) { expectedHost = RETRY_TEST_SECONDARY_HOST; } } if (!request.getUrl().getHost().equals(expectedHost)) { throw new IllegalArgumentException("The host does not match the expected host"); } /* This policy will add test headers and query parameters. Ensure they are removed/reset for each retry. The retry policy should be starting with a fresh copy of the request for every try. */ if (request.getHeaders().value(RETRY_TEST_HEADER) != null) { throw new IllegalArgumentException("Headers not reset."); } if ((request.getUrl().getQuery() != null && request.getUrl().getQuery().contains(RETRY_TEST_QUERY_PARAM))) { throw new IllegalArgumentException("Query params not reset."); } ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); Disposable disposable = request.getBody().subscribe(data -> { try { outputStream.write(data.array()); } catch (IOException ex) { throw Exceptions.propagate(ex); } }); while (!disposable.isDisposed()) { System.out.println("Waiting for Flux to finish to prevent blocking on another thread exception"); } if (RETRY_TEST_DEFAULT_DATA.compareTo(ByteBuffer.wrap(outputStream.toByteArray())) != 0) { throw new IllegalArgumentException(("Body not reset.")); } /* Modify the request as policies downstream of the retry policy are likely to do. These must be reset on each try. */ request.getHeaders().put(RETRY_TEST_HEADER, "testheader"); UrlBuilder builder = UrlBuilder.parse(request.getUrl()); builder.setQueryParameter(RETRY_TEST_QUERY_PARAM, "testquery"); try { request.setUrl(builder.toURL()); } catch (MalformedURLException e) { throw new IllegalArgumentException("The URL has been mangled"); } switch (this.factory.retryTestScenario) { case RETRY_TEST_SCENARIO_RETRY_UNTIL_SUCCESS: switch (this.factory.tryNumber) { case 1: /* The timer is set with a timeout on the Mono used to make the request. If the Mono doesn't return success fast enough, it will throw a TimeoutException. We can short circuit the waiting by simply returning an error. We will validate the time parameter later. Here, we just test that a timeout is retried. */ return Mono.error(new TimeoutException()); case 2: return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; case 3: return RETRY_TEST_TIMEOUT_ERROR_RESPONSE; case 4: /* By returning 404 when we should be testing against the secondary, we exercise the logic that should prevent further tries to secondary when the secondary evidently doesn't have the data. */ return RETRY_TEST_NOT_FOUND_RESPONSE; case 5: return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; case 6: return RETRY_TEST_OK_RESPONSE; default: throw new IllegalArgumentException("Continued trying after success."); } case RETRY_TEST_SCENARIO_RETRY_UNTIL_MAX_RETRIES: return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; case RETRY_TEST_SCENARIO_NON_RETRYABLE: if (this.factory.tryNumber == 1) { return RETRY_TEST_NON_RETRYABLE_ERROR; } else { throw new IllegalArgumentException("Continued trying after non retryable error."); } case RETRY_TEST_SCENARIO_NON_RETRYABLE_SECONDARY: switch (this.factory.tryNumber) { case 1: return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; case 2: return RETRY_TEST_NON_RETRYABLE_ERROR; default: throw new IllegalArgumentException("Continued trying after non retryable error."); } case RETRY_TEST_SCENARIO_NETWORK_ERROR: switch (this.factory.tryNumber) { case 1: case 2: return Mono.error(new IOException()); case 3: return RETRY_TEST_OK_RESPONSE; default: throw new IllegalArgumentException("Continued retrying after success."); } case RETRY_TEST_SCENARIO_TRY_TIMEOUT: switch (this.factory.tryNumber) { case 1: case 2: return RETRY_TEST_OK_RESPONSE.delaySubscription(Duration.ofSeconds(options.tryTimeout() + 1)); case 3: return RETRY_TEST_OK_RESPONSE.delaySubscription(Duration.ofSeconds(options.tryTimeout() - 1)); default: throw new IllegalArgumentException("Continued retrying after success"); } case RETRY_TEST_SCENARIO_EXPONENTIAL_TIMING: switch (this.factory.tryNumber) { case 1: this.factory.time = OffsetDateTime.now(); return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; case 2: /* Calculation for secondary is always the same, so we don't need to keep testing it. Not trying the secondary any more will also speed up the test. */ return testDelayBounds(1, false, RETRY_TEST_NOT_FOUND_RESPONSE); case 3: return testDelayBounds(2, true, RETRY_TEST_TEMPORARY_ERROR_RESPONSE); case 4: return testDelayBounds(3, true, RETRY_TEST_TEMPORARY_ERROR_RESPONSE); case 5: /* With the current configuration in RetryTest, the maxRetryDelay should be reached upon the fourth try to the primary. */ return testMaxDelayBounds(RETRY_TEST_TEMPORARY_ERROR_RESPONSE); case 6: return testMaxDelayBounds(RETRY_TEST_OK_RESPONSE); default: throw new IllegalArgumentException("Max retries exceeded/continued retrying after success"); } case RETRY_TEST_SCENARIO_FIXED_TIMING: switch (this.factory.tryNumber) { case 1: this.factory.time = OffsetDateTime.now(); return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; case 2: return testDelayBounds(1, false, RETRY_TEST_TEMPORARY_ERROR_RESPONSE); case 3: return testDelayBounds(2, true, RETRY_TEST_TEMPORARY_ERROR_RESPONSE); case 4: /* Fixed backoff means it's always the same and we never hit the max, no need to keep testing. */ return RETRY_TEST_OK_RESPONSE; default: throw new IllegalArgumentException("Retries continued after success."); } case RETRY_TEST_SCENARIO_NON_REPLAYABLE_FLOWABLE: switch (this.factory.tryNumber) { case 1: return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; case 2: return Mono.error(new UnexpectedLengthException("Unexpected length", 5, 6)); default: throw new IllegalArgumentException("Retries continued on non retryable error."); } default: throw new IllegalArgumentException("Invalid retry test scenario."); } } /* Calculate the delay in seconds. Round up to ensure we include the maximum value and some offset for the code executing between the original calculation in the retry policy and this check. */ private long calcPrimaryDelay(int tryNumber) { switch (this.factory.retryTestScenario) { case RETRY_TEST_SCENARIO_EXPONENTIAL_TIMING: return (long) Math.ceil( ((pow(2L, tryNumber - 1) - 1L) * this.factory.options.retryDelayInMs()) / 1000); case RETRY_TEST_SCENARIO_FIXED_TIMING: return (long) Math.ceil(this.factory.options.retryDelayInMs() / 1000); default: throw new IllegalArgumentException("Invalid test scenario"); } } private OffsetDateTime calcLowerBound(OffsetDateTime start, int primaryTryNumber, boolean tryingPrimary) { if (tryingPrimary) { return start.plus(calcPrimaryDelay(primaryTryNumber) * 1000 - 1000, ChronoUnit.MILLIS); } else { return start.plus(500, ChronoUnit.MILLIS); } } private Mono<HttpResponse> testDelayBounds(int primaryTryNumber, boolean tryingPrimary, Mono<HttpResponse> response) { /* We have to return a new Mono so that the calculation for time is performed at the correct time, i.e. when the Mono is actually subscribed to. This mocks an HttpClient because the requests are made only when the Mono is subscribed to, not when all the infrastructure around it is put in place, and we care about the delay before the request itself. */ return Mono.defer(() -> Mono.fromCallable(() -> { OffsetDateTime now = OffsetDateTime.now(); if (now.isAfter(calcUpperBound(factory.time, primaryTryNumber, tryingPrimary)) || now.isBefore(calcLowerBound(factory.time, primaryTryNumber, tryingPrimary))) { throw new IllegalArgumentException("Delay was not within jitter bounds"); } factory.time = now; return response.block(); })); } private Mono<HttpResponse> testMaxDelayBounds(Mono<HttpResponse> response) { return Mono.defer(() -> Mono.fromCallable(() -> { OffsetDateTime now = OffsetDateTime.now(); if (now.isAfter(factory.time.plusSeconds((long) Math.ceil((factory.options.maxRetryDelayInMs() / 1000) + 1)))) { throw new IllegalArgumentException("Max retry delay exceeded"); } else if (now.isBefore(factory.time.plusSeconds((long) Math.ceil((factory.options.maxRetryDelayInMs() / 1000) - 1)))) { throw new IllegalArgumentException("Retry did not delay long enough"); } factory.time = now; return response.block(); })); } }
class RetryTestClient implements HttpClient { private RequestRetryTestFactory factory; RetryTestClient(RequestRetryTestFactory parent) { this.factory = parent; } @Override public Mono<HttpResponse> send(HttpRequest request) { this.factory.tryNumber++; if (this.factory.tryNumber > this.factory.options.maxTries()) { throw new IllegalArgumentException("Try number has exceeded max tries"); } String expectedHost = RETRY_TEST_PRIMARY_HOST; if (this.factory.tryNumber % 2 == 0) { /* Special cases: retry until success scenario fail's on the 4th try with a 404 on the secondary, so we never expect it to check the secondary after that. All other tests should continue to check the secondary. Exponential timing only tests secondary backoff once but uses the rest of the retries to hit the max delay. */ if (!((this.factory.retryTestScenario == RequestRetryTestFactory.RETRY_TEST_SCENARIO_RETRY_UNTIL_SUCCESS && this.factory.tryNumber > 4) || (this.factory.retryTestScenario == RequestRetryTestFactory.RETRY_TEST_SCENARIO_EXPONENTIAL_TIMING && this.factory.tryNumber > 2))) { expectedHost = RETRY_TEST_SECONDARY_HOST; } } if (!request.getUrl().getHost().equals(expectedHost)) { throw new IllegalArgumentException("The host does not match the expected host"); } /* This policy will add test headers and query parameters. Ensure they are removed/reset for each retry. The retry policy should be starting with a fresh copy of the request for every try. */ if (request.getHeaders().value(RETRY_TEST_HEADER) != null) { throw new IllegalArgumentException("Headers not reset."); } if ((request.getUrl().getQuery() != null && request.getUrl().getQuery().contains(RETRY_TEST_QUERY_PARAM))) { throw new IllegalArgumentException("Query params not reset."); } ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); Disposable disposable = request.getBody().subscribe(data -> { try { outputStream.write(data.array()); } catch (IOException ex) { throw Exceptions.propagate(ex); } }); while (!disposable.isDisposed()) { System.out.println("Waiting for Flux to finish to prevent blocking on another thread exception"); } if (RETRY_TEST_DEFAULT_DATA.compareTo(ByteBuffer.wrap(outputStream.toByteArray())) != 0) { throw new IllegalArgumentException(("Body not reset.")); } /* Modify the request as policies downstream of the retry policy are likely to do. These must be reset on each try. */ request.getHeaders().put(RETRY_TEST_HEADER, "testheader"); UrlBuilder builder = UrlBuilder.parse(request.getUrl()); builder.setQueryParameter(RETRY_TEST_QUERY_PARAM, "testquery"); try { request.setUrl(builder.toURL()); } catch (MalformedURLException e) { throw new IllegalArgumentException("The URL has been mangled"); } switch (this.factory.retryTestScenario) { case RETRY_TEST_SCENARIO_RETRY_UNTIL_SUCCESS: switch (this.factory.tryNumber) { case 1: /* The timer is set with a timeout on the Mono used to make the request. If the Mono doesn't return success fast enough, it will throw a TimeoutException. We can short circuit the waiting by simply returning an error. We will validate the time parameter later. Here, we just test that a timeout is retried. */ return Mono.error(new TimeoutException()); case 2: return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; case 3: return RETRY_TEST_TIMEOUT_ERROR_RESPONSE; case 4: /* By returning 404 when we should be testing against the secondary, we exercise the logic that should prevent further tries to secondary when the secondary evidently doesn't have the data. */ return RETRY_TEST_NOT_FOUND_RESPONSE; case 5: return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; case 6: return RETRY_TEST_OK_RESPONSE; default: throw new IllegalArgumentException("Continued trying after success."); } case RETRY_TEST_SCENARIO_RETRY_UNTIL_MAX_RETRIES: return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; case RETRY_TEST_SCENARIO_NON_RETRYABLE: if (this.factory.tryNumber == 1) { return RETRY_TEST_NON_RETRYABLE_ERROR; } else { throw new IllegalArgumentException("Continued trying after non retryable error."); } case RETRY_TEST_SCENARIO_NON_RETRYABLE_SECONDARY: switch (this.factory.tryNumber) { case 1: return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; case 2: return RETRY_TEST_NON_RETRYABLE_ERROR; default: throw new IllegalArgumentException("Continued trying after non retryable error."); } case RETRY_TEST_SCENARIO_NETWORK_ERROR: switch (this.factory.tryNumber) { case 1: case 2: return Mono.error(new IOException()); case 3: return RETRY_TEST_OK_RESPONSE; default: throw new IllegalArgumentException("Continued retrying after success."); } case RETRY_TEST_SCENARIO_TRY_TIMEOUT: switch (this.factory.tryNumber) { case 1: case 2: return RETRY_TEST_OK_RESPONSE.delaySubscription(Duration.ofSeconds(options.tryTimeout() + 1)); case 3: return RETRY_TEST_OK_RESPONSE.delaySubscription(Duration.ofSeconds(options.tryTimeout() - 1)); default: throw new IllegalArgumentException("Continued retrying after success"); } case RETRY_TEST_SCENARIO_EXPONENTIAL_TIMING: switch (this.factory.tryNumber) { case 1: this.factory.time = OffsetDateTime.now(); return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; case 2: /* Calculation for secondary is always the same, so we don't need to keep testing it. Not trying the secondary any more will also speed up the test. */ return testDelayBounds(1, false, RETRY_TEST_NOT_FOUND_RESPONSE); case 3: return testDelayBounds(2, true, RETRY_TEST_TEMPORARY_ERROR_RESPONSE); case 4: return testDelayBounds(3, true, RETRY_TEST_TEMPORARY_ERROR_RESPONSE); case 5: /* With the current configuration in RetryTest, the maxRetryDelay should be reached upon the fourth try to the primary. */ return testMaxDelayBounds(RETRY_TEST_TEMPORARY_ERROR_RESPONSE); case 6: return testMaxDelayBounds(RETRY_TEST_OK_RESPONSE); default: throw new IllegalArgumentException("Max retries exceeded/continued retrying after success"); } case RETRY_TEST_SCENARIO_FIXED_TIMING: switch (this.factory.tryNumber) { case 1: this.factory.time = OffsetDateTime.now(); return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; case 2: return testDelayBounds(1, false, RETRY_TEST_TEMPORARY_ERROR_RESPONSE); case 3: return testDelayBounds(2, true, RETRY_TEST_TEMPORARY_ERROR_RESPONSE); case 4: /* Fixed backoff means it's always the same and we never hit the max, no need to keep testing. */ return RETRY_TEST_OK_RESPONSE; default: throw new IllegalArgumentException("Retries continued after success."); } case RETRY_TEST_SCENARIO_NON_REPLAYABLE_FLOWABLE: switch (this.factory.tryNumber) { case 1: return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; case 2: return Mono.error(new UnexpectedLengthException("Unexpected length", 5, 6)); default: throw new IllegalArgumentException("Retries continued on non retryable error."); } default: throw new IllegalArgumentException("Invalid retry test scenario."); } } /* Calculate the delay in seconds. Round up to ensure we include the maximum value and some offset for the code executing between the original calculation in the retry policy and this check. */ private long calcPrimaryDelay(int tryNumber) { switch (this.factory.retryTestScenario) { case RETRY_TEST_SCENARIO_EXPONENTIAL_TIMING: return (long) Math.ceil( ((pow(2L, tryNumber - 1) - 1L) * this.factory.options.retryDelayInMs()) / 1000); case RETRY_TEST_SCENARIO_FIXED_TIMING: return (long) Math.ceil(this.factory.options.retryDelayInMs() / 1000); default: throw new IllegalArgumentException("Invalid test scenario"); } } private OffsetDateTime calcLowerBound(OffsetDateTime start, int primaryTryNumber, boolean tryingPrimary) { if (tryingPrimary) { return start.plus(calcPrimaryDelay(primaryTryNumber) * 1000 - 1000, ChronoUnit.MILLIS); } else { return start.plus(500, ChronoUnit.MILLIS); } } private Mono<HttpResponse> testDelayBounds(int primaryTryNumber, boolean tryingPrimary, Mono<HttpResponse> response) { /* We have to return a new Mono so that the calculation for time is performed at the correct time, i.e. when the Mono is actually subscribed to. This mocks an HttpClient because the requests are made only when the Mono is subscribed to, not when all the infrastructure around it is put in place, and we care about the delay before the request itself. */ return Mono.defer(() -> Mono.fromCallable(() -> { OffsetDateTime now = OffsetDateTime.now(); if (now.isAfter(calcUpperBound(factory.time, primaryTryNumber, tryingPrimary)) || now.isBefore(calcLowerBound(factory.time, primaryTryNumber, tryingPrimary))) { throw new IllegalArgumentException("Delay was not within jitter bounds"); } factory.time = now; return response.block(); })); } private Mono<HttpResponse> testMaxDelayBounds(Mono<HttpResponse> response) { return Mono.defer(() -> Mono.fromCallable(() -> { OffsetDateTime now = OffsetDateTime.now(); if (now.isAfter(factory.time.plusSeconds((long) Math.ceil((factory.options.maxRetryDelayInMs() / 1000) + 1)))) { throw new IllegalArgumentException("Max retry delay exceeded"); } else if (now.isBefore(factory.time.plusSeconds((long) Math.ceil((factory.options.maxRetryDelayInMs() / 1000) - 1)))) { throw new IllegalArgumentException("Retry did not delay long enough"); } factory.time = now; return response.block(); })); } }
Fixing in #5469
long calculateDelayInMs(int tryCount) { long delay; switch (this.retryPolicyType) { case EXPONENTIAL: delay = ((tryCount - 1) * (tryCount - 1) - 1L) * this.retryDelayInMs; break; case FIXED: delay = this.retryDelayInMs; break; default: throw logger.logExceptionAsError(new IllegalArgumentException("Invalid retry policy type.")); } return Math.min(delay, this.maxRetryDelayInMs); }
delay = ((tryCount - 1) * (tryCount - 1) - 1L) * this.retryDelayInMs;
long calculateDelayInMs(int tryCount) { long delay; switch (this.retryPolicyType) { case EXPONENTIAL: delay = ((tryCount - 1) * (tryCount - 1) - 1L) * this.retryDelayInMs; break; case FIXED: delay = this.retryDelayInMs; break; default: throw logger.logExceptionAsError(new IllegalArgumentException("Invalid retry policy type.")); } return Math.min(delay, this.maxRetryDelayInMs); }
class RequestRetryOptions { private final ClientLogger logger = new ClientLogger(RequestRetryOptions.class); private final int maxTries; private final int tryTimeout; private final long retryDelayInMs; private final long maxRetryDelayInMs; private final RetryPolicyType retryPolicyType; private final String secondaryHost; /** * Configures how the {@link HttpPipeline} should retry requests. */ public RequestRetryOptions() { this(RetryPolicyType.EXPONENTIAL, null, null, null, null, null); } /** * Configures how the {@link HttpPipeline} should retry requests. * * @param retryPolicyType Optional. A {@link RetryPolicyType} specifying the type of retry pattern to use, default * value is {@link RetryPolicyType * @param maxTries Optional. Maximum number of attempts an operation will be retried, default is {@code 4}. * @param tryTimeout Optional. Specified the maximum time allowed before a request is cancelled and assumed failed, * default is {@link Integer * * <p>This value should be based on the bandwidth available to the host machine and proximity to the Storage * service, a good starting point may be 60 seconds per MB of anticipated payload size.</p> * @param retryDelayInMs Optional. Specifies the amount of delay to use before retrying an operation, default value * is {@code 4ms} when {@code retryPolicyType} is {@link RetryPolicyType * when {@code retryPolicyType} is {@link RetryPolicyType * @param maxRetryDelayInMs Optional. Specifies the maximum delay allowed before retrying an operation, default * value is {@code 120ms}. * @param secondaryHost Optional. Specified a secondary Storage account to retry requests against, default is none. * * <p>Before setting this understand the issues around reading stale and potentially-inconsistent data, view these * <a href=https: * for more information.</p> * @throws IllegalArgumentException If {@code retryDelayInMs} and {@code maxRetryDelayInMs} are not both null or * non-null or {@code retryPolicyType} isn't {@link RetryPolicyType */ public RequestRetryOptions(RetryPolicyType retryPolicyType, Integer maxTries, Integer tryTimeout, Long retryDelayInMs, Long maxRetryDelayInMs, String secondaryHost) { this.retryPolicyType = retryPolicyType == null ? RetryPolicyType.EXPONENTIAL : retryPolicyType; if (maxTries != null) { Utility.assertInBounds("maxRetries", maxTries, 1, Integer.MAX_VALUE); this.maxTries = maxTries; } else { this.maxTries = 4; } if (tryTimeout != null) { Utility.assertInBounds("tryTimeout", tryTimeout, 1, Integer.MAX_VALUE); this.tryTimeout = tryTimeout; } else { this.tryTimeout = Integer.MAX_VALUE; } if ((retryDelayInMs == null && maxRetryDelayInMs != null) || (retryDelayInMs != null && maxRetryDelayInMs == null)) { throw logger.logExceptionAsError( new IllegalArgumentException("Both retryDelay and maxRetryDelay must be null or neither can be null")); } if (retryDelayInMs != null) { Utility.assertInBounds("maxRetryDelayInMs", maxRetryDelayInMs, 1, Long.MAX_VALUE); Utility.assertInBounds("retryDelayInMs", retryDelayInMs, 1, maxRetryDelayInMs); this.maxRetryDelayInMs = maxRetryDelayInMs; this.retryDelayInMs = retryDelayInMs; } else { switch (this.retryPolicyType) { case EXPONENTIAL: this.retryDelayInMs = TimeUnit.SECONDS.toMillis(4); break; case FIXED: this.retryDelayInMs = TimeUnit.SECONDS.toMillis(30); break; default: throw logger.logExceptionAsError(new IllegalArgumentException("Invalid 'RetryPolicyType'.")); } this.maxRetryDelayInMs = TimeUnit.SECONDS.toMillis(120); } this.secondaryHost = secondaryHost; } /** * @return the maximum number of retries that will be attempted. */ public int maxTries() { return this.maxTries; } /** * @return the maximum time, in seconds, allowed for a request until it is considered timed out. */ public int tryTimeout() { return this.tryTimeout; } /** * @return the URI of the secondary host where retries are attempted. If this is null then there is no secondary * host and all retries are attempted against the original host. */ public String secondaryHost() { return this.secondaryHost; } /** * @return the delay in milliseconds between each retry attempt. */ public long retryDelayInMs() { return retryDelayInMs; } /** * @return the maximum delay in milliseconds allowed between each retry. */ public long maxRetryDelayInMs() { return maxRetryDelayInMs; } /** * Calculates how long to delay before sending the next request. * * @param tryCount An {@code int} indicating which try we are on. * @return A {@code long} value of how many milliseconds to delay. */ }
class RequestRetryOptions { private final ClientLogger logger = new ClientLogger(RequestRetryOptions.class); private final int maxTries; private final int tryTimeout; private final long retryDelayInMs; private final long maxRetryDelayInMs; private final RetryPolicyType retryPolicyType; private final String secondaryHost; /** * Configures how the {@link HttpPipeline} should retry requests. */ public RequestRetryOptions() { this(RetryPolicyType.EXPONENTIAL, null, null, null, null, null); } /** * Configures how the {@link HttpPipeline} should retry requests. * * @param retryPolicyType Optional. A {@link RetryPolicyType} specifying the type of retry pattern to use, default * value is {@link RetryPolicyType * @param maxTries Optional. Maximum number of attempts an operation will be retried, default is {@code 4}. * @param tryTimeout Optional. Specified the maximum time allowed before a request is cancelled and assumed failed, * default is {@link Integer * * <p>This value should be based on the bandwidth available to the host machine and proximity to the Storage * service, a good starting point may be 60 seconds per MB of anticipated payload size.</p> * @param retryDelayInMs Optional. Specifies the amount of delay to use before retrying an operation, default value * is {@code 4ms} when {@code retryPolicyType} is {@link RetryPolicyType * when {@code retryPolicyType} is {@link RetryPolicyType * @param maxRetryDelayInMs Optional. Specifies the maximum delay allowed before retrying an operation, default * value is {@code 120ms}. * @param secondaryHost Optional. Specified a secondary Storage account to retry requests against, default is none. * * <p>Before setting this understand the issues around reading stale and potentially-inconsistent data, view these * <a href=https: * for more information.</p> * @throws IllegalArgumentException If {@code retryDelayInMs} and {@code maxRetryDelayInMs} are not both null or * non-null or {@code retryPolicyType} isn't {@link RetryPolicyType */ public RequestRetryOptions(RetryPolicyType retryPolicyType, Integer maxTries, Integer tryTimeout, Long retryDelayInMs, Long maxRetryDelayInMs, String secondaryHost) { this.retryPolicyType = retryPolicyType == null ? RetryPolicyType.EXPONENTIAL : retryPolicyType; if (maxTries != null) { Utility.assertInBounds("maxRetries", maxTries, 1, Integer.MAX_VALUE); this.maxTries = maxTries; } else { this.maxTries = 4; } if (tryTimeout != null) { Utility.assertInBounds("tryTimeout", tryTimeout, 1, Integer.MAX_VALUE); this.tryTimeout = tryTimeout; } else { this.tryTimeout = Integer.MAX_VALUE; } if ((retryDelayInMs == null && maxRetryDelayInMs != null) || (retryDelayInMs != null && maxRetryDelayInMs == null)) { throw logger.logExceptionAsError( new IllegalArgumentException("Both retryDelay and maxRetryDelay must be null or neither can be null")); } if (retryDelayInMs != null) { Utility.assertInBounds("maxRetryDelayInMs", maxRetryDelayInMs, 1, Long.MAX_VALUE); Utility.assertInBounds("retryDelayInMs", retryDelayInMs, 1, maxRetryDelayInMs); this.maxRetryDelayInMs = maxRetryDelayInMs; this.retryDelayInMs = retryDelayInMs; } else { switch (this.retryPolicyType) { case EXPONENTIAL: this.retryDelayInMs = TimeUnit.SECONDS.toMillis(4); break; case FIXED: this.retryDelayInMs = TimeUnit.SECONDS.toMillis(30); break; default: throw logger.logExceptionAsError(new IllegalArgumentException("Invalid 'RetryPolicyType'.")); } this.maxRetryDelayInMs = TimeUnit.SECONDS.toMillis(120); } this.secondaryHost = secondaryHost; } /** * @return the maximum number of retries that will be attempted. */ public int maxTries() { return this.maxTries; } /** * @return the maximum time, in seconds, allowed for a request until it is considered timed out. */ public int tryTimeout() { return this.tryTimeout; } /** * @return the URI of the secondary host where retries are attempted. If this is null then there is no secondary * host and all retries are attempted against the original host. */ public String secondaryHost() { return this.secondaryHost; } /** * @return the delay in milliseconds between each retry attempt. */ public long retryDelayInMs() { return retryDelayInMs; } /** * @return the maximum delay in milliseconds allowed between each retry. */ public long maxRetryDelayInMs() { return maxRetryDelayInMs; } /** * Calculates how long to delay before sending the next request. * * @param tryCount An {@code int} indicating which try we are on. * @return A {@code long} value of how many milliseconds to delay. */ }
Use logger instead.
public void receiveUntilTimeoutMultipleTimes() throws IOException { this.consumer.close(); this.consumer = null; final int numberOfEvents = 15; final int numberOfEvents2 = 3; final String partitionId = "1"; final List<EventData> events = getEventsAsList(numberOfEvents, TestUtils.MESSAGE_TRACKING_ID); final List<EventData> events2 = getEventsAsList(numberOfEvents2, TestUtils.MESSAGE_TRACKING_ID); final EventHubConsumer consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, EventPosition.fromEnqueuedTime(Instant.now())); final EventHubProducer producer = client.createProducer(new EventHubProducerOptions().setPartitionId(partitionId)); try { producer.send(events); final IterableStream<EventData> receive = consumer.receive(100, Duration.ofSeconds(3)); System.out.println("Sending second batch."); producer.send(events2); System.out.println("Receiving second batch."); final IterableStream<EventData> receive2 = consumer.receive(100, Duration.ofSeconds(5)); final List<EventData> asList = receive.stream().collect(Collectors.toList()); Assert.assertEquals(numberOfEvents, asList.size()); final List<EventData> asList2 = receive2.stream().collect(Collectors.toList()); Assert.assertEquals(numberOfEvents2, asList2.size()); } finally { dispose(consumer, producer); } }
System.out.println("Receiving second batch.");
public void receiveUntilTimeoutMultipleTimes() { final int numberOfEvents = 15; final int numberOfEvents2 = 3; final String partitionId = "1"; final List<EventData> events = getEventsAsList(numberOfEvents); final List<EventData> events2 = getEventsAsList(numberOfEvents2); final EventHubConsumer consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, EventPosition.fromEnqueuedTime(Instant.now())); final EventHubProducer producer = client.createProducer(new EventHubProducerOptions().setPartitionId(partitionId)); try { producer.send(events); final IterableStream<EventData> receive = consumer.receive(100, Duration.ofSeconds(3)); logger.info("Sending second batch."); producer.send(events2); logger.info("Receiving second batch."); final IterableStream<EventData> receive2 = consumer.receive(100, Duration.ofSeconds(5)); final List<EventData> asList = receive.stream().collect(Collectors.toList()); Assert.assertEquals(numberOfEvents, asList.size()); final List<EventData> asList2 = receive2.stream().collect(Collectors.toList()); Assert.assertEquals(numberOfEvents2, asList2.size()); } finally { dispose(consumer, producer); } }
class EventHubConsumerIntegrationTest extends IntegrationTestBase { private static final String PARTITION_ID = "0"; private static final int NUMBER_OF_EVENTS = 10; private static final AtomicBoolean HAS_PUSHED_EVENTS = new AtomicBoolean(); private static volatile IntegrationTestEventData testData = null; private EventHubClient client; private EventHubConsumer consumer; public EventHubConsumerIntegrationTest() { super(new ClientLogger(EventHubConsumerIntegrationTest.class)); } @Rule public TestName testName = new TestName(); @Override protected String getTestName() { return testName.getMethodName(); } @Override protected void beforeTest() { super.beforeTest(); client = new EventHubClientBuilder() .connectionString(getConnectionString()) .retry(RETRY_OPTIONS) .buildClient(); if (HAS_PUSHED_EVENTS.getAndSet(true)) { logger.info("Already pushed events to partition. Skipping."); } else { final EventHubProducerOptions options = new EventHubProducerOptions().setPartitionId(PARTITION_ID); testData = setupEventTestData(client, NUMBER_OF_EVENTS, options); } consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, PARTITION_ID, EventPosition.fromEnqueuedTime(testData.getEnqueuedTime())); } @Override protected void afterTest() { dispose(consumer, client); } /** * Verifies that we can receive events a single time that is up to the batch size. */ @Test public void receiveEvents() { final int numberOfEvents = 5; final IterableStream<EventData> actual = consumer.receive(numberOfEvents, Duration.ofSeconds(10)); final List<EventData> asList = actual.stream().collect(Collectors.toList()); Assert.assertEquals(numberOfEvents, asList.size()); } /** * Verifies that we can receive multiple times. */ @Test public void receiveEventsMultipleTimes() { final int numberOfEvents = 5; final int secondNumberOfEvents = 2; final Duration waitTime = Duration.ofSeconds(10); final IterableStream<EventData> actual = consumer.receive(numberOfEvents, waitTime); final IterableStream<EventData> actual2 = consumer.receive(secondNumberOfEvents, waitTime); final Map<Long, EventData> asList = actual.stream() .collect(Collectors.toMap(EventData::getSequenceNumber, Function.identity())); Assert.assertEquals(numberOfEvents, asList.size()); final Map<Long, EventData> asList2 = actual2.stream() .collect(Collectors.toMap(EventData::getSequenceNumber, Function.identity())); Assert.assertEquals(secondNumberOfEvents, asList2.size()); final Long maximumSequence = Collections.max(asList.keySet()); final Long minimumSequence = Collections.min(asList2.keySet()); Assert.assertTrue("The minimum in second receive should be less than first receive.", maximumSequence < minimumSequence); } /** * Verify that we can receive until the timeout. */ @Test public void receiveUntilTimeout() { final int numberOfEvents = 15; final String partitionId = "1"; final List<EventData> events = getEventsAsList(numberOfEvents, TestUtils.MESSAGE_TRACKING_ID); final EventPosition position = EventPosition.fromEnqueuedTime(Instant.now()); final EventHubConsumer consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, position); final EventHubProducer producer = client.createProducer(new EventHubProducerOptions().setPartitionId(partitionId)); try { producer.send(events); final IterableStream<EventData> receive = consumer.receive(100, Duration.ofSeconds(5)); final List<EventData> asList = receive.stream().collect(Collectors.toList()); Assert.assertEquals(numberOfEvents, asList.size()); } finally { dispose(producer, consumer); } } /** * Verify that we don't continue to fetch more events when there are no listeners. */ @Test public void doesNotContinueToReceiveEvents() { final int numberOfEvents = 15; final int secondSetOfEvents = 25; final int receiveNumber = 10; final String partitionId = "1"; final List<EventData> events = getEventsAsList(numberOfEvents, TestUtils.MESSAGE_TRACKING_ID); final List<EventData> events2 = getEventsAsList(secondSetOfEvents, TestUtils.MESSAGE_TRACKING_ID); final EventHubConsumer consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, EventPosition.fromEnqueuedTime(Instant.now())); final EventHubProducer producer = client.createProducer(new EventHubProducerOptions().setPartitionId(partitionId)); try { producer.send(events); final IterableStream<EventData> receive = consumer.receive(receiveNumber, Duration.ofSeconds(5)); final List<EventData> asList = receive.stream().collect(Collectors.toList()); Assert.assertEquals(receiveNumber, asList.size()); producer.send(events2); } finally { dispose(consumer, producer); } } /** * Verify that we don't continue to fetch more events when there are no listeners. */ @Test public void multipleConsumers() { final int numberOfEvents = 15; final int receiveNumber = 10; final String partitionId = "1"; final List<EventData> events = getEventsAsList(numberOfEvents, TestUtils.MESSAGE_TRACKING_ID); final EventPosition position = EventPosition.fromEnqueuedTime(Instant.now()); final EventHubConsumer consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, position); final EventHubConsumer consumer2 = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, position); final EventHubProducer producer = client.createProducer(new EventHubProducerOptions().setPartitionId(partitionId)); try { producer.send(events); final IterableStream<EventData> receive = consumer.receive(receiveNumber, Duration.ofSeconds(5)); final IterableStream<EventData> receive2 = consumer2.receive(receiveNumber, Duration.ofSeconds(5)); final List<Long> asList = receive.stream().map(EventData::getSequenceNumber).collect(Collectors.toList()); final List<Long> asList2 = receive2.stream().map(EventData::getSequenceNumber).collect(Collectors.toList()); Assert.assertEquals(receiveNumber, asList.size()); Assert.assertEquals(receiveNumber, asList2.size()); Collections.sort(asList); Collections.sort(asList2); final Long[] first = asList.toArray(new Long[0]); final Long[] second = asList2.toArray(new Long[0]); Assert.assertArrayEquals(first, second); } finally { dispose(consumer, producer); } } /** * Verify that we can receive until the timeout multiple times. */ @Test private static List<EventData> getEventsAsList(int numberOfEvents, String messageId) { return TestUtils.getEvents(numberOfEvents, messageId).collectList().block(); } }
class EventHubConsumerIntegrationTest extends IntegrationTestBase { private static final String PARTITION_ID = "0"; private static final int NUMBER_OF_EVENTS = 10; private static final AtomicBoolean HAS_PUSHED_EVENTS = new AtomicBoolean(); private static volatile IntegrationTestEventData testData = null; private EventHubClient client; private EventHubConsumer consumer; public EventHubConsumerIntegrationTest() { super(new ClientLogger(EventHubConsumerIntegrationTest.class)); } @Rule public TestName testName = new TestName(); @Override protected String getTestName() { return testName.getMethodName(); } @Override protected void beforeTest() { super.beforeTest(); client = new EventHubClientBuilder() .connectionString(getConnectionString()) .scheduler(Schedulers.single()) .retry(RETRY_OPTIONS) .buildClient(); if (HAS_PUSHED_EVENTS.getAndSet(true)) { logger.info("Already pushed events to partition. Skipping."); } else { final EventHubProducerOptions options = new EventHubProducerOptions().setPartitionId(PARTITION_ID); testData = setupEventTestData(client, NUMBER_OF_EVENTS, options); } consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, PARTITION_ID, EventPosition.fromEnqueuedTime(testData.getEnqueuedTime())); } @Override protected void afterTest() { dispose(consumer, client); } /** * Verifies that we can receive events a single time that is up to the batch size. */ @Test public void receiveEvents() { final int numberOfEvents = 5; final IterableStream<EventData> actual = consumer.receive(numberOfEvents, Duration.ofSeconds(10)); final List<EventData> asList = actual.stream().collect(Collectors.toList()); Assert.assertEquals(numberOfEvents, asList.size()); } /** * Verifies that we can receive multiple times. */ @Test public void receiveEventsMultipleTimes() { final int numberOfEvents = 5; final int secondNumberOfEvents = 2; final Duration waitTime = Duration.ofSeconds(10); final IterableStream<EventData> actual = consumer.receive(numberOfEvents, waitTime); final IterableStream<EventData> actual2 = consumer.receive(secondNumberOfEvents, waitTime); final Map<Long, EventData> asList = actual.stream() .collect(Collectors.toMap(EventData::getSequenceNumber, Function.identity())); Assert.assertEquals(numberOfEvents, asList.size()); final Map<Long, EventData> asList2 = actual2.stream() .collect(Collectors.toMap(EventData::getSequenceNumber, Function.identity())); Assert.assertEquals(secondNumberOfEvents, asList2.size()); final Long maximumSequence = Collections.max(asList.keySet()); final Long minimumSequence = Collections.min(asList2.keySet()); Assert.assertTrue("The minimum in second receive should be less than first receive.", maximumSequence < minimumSequence); } /** * Verify that we can receive until the timeout. */ @Test public void receiveUntilTimeout() { final int numberOfEvents = 15; final String partitionId = "1"; final List<EventData> events = getEventsAsList(numberOfEvents); final EventPosition position = EventPosition.fromEnqueuedTime(Instant.now()); final EventHubConsumer consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, position); final EventHubProducer producer = client.createProducer(new EventHubProducerOptions().setPartitionId(partitionId)); try { producer.send(events); final IterableStream<EventData> receive = consumer.receive(100, Duration.ofSeconds(5)); final List<EventData> asList = receive.stream().collect(Collectors.toList()); Assert.assertEquals(numberOfEvents, asList.size()); } finally { dispose(producer, consumer); } } /** * Verify that we don't continue to fetch more events when there are no listeners. */ @Test public void doesNotContinueToReceiveEvents() { final int numberOfEvents = 15; final int secondSetOfEvents = 25; final int receiveNumber = 10; final String partitionId = "1"; final List<EventData> events = getEventsAsList(numberOfEvents); final List<EventData> events2 = getEventsAsList(secondSetOfEvents); final EventHubConsumer consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, EventPosition.fromEnqueuedTime(Instant.now())); final EventHubProducer producer = client.createProducer(new EventHubProducerOptions().setPartitionId(partitionId)); try { producer.send(events); final IterableStream<EventData> receive = consumer.receive(receiveNumber, Duration.ofSeconds(5)); final List<EventData> asList = receive.stream().collect(Collectors.toList()); Assert.assertEquals(receiveNumber, asList.size()); producer.send(events2); } finally { dispose(consumer, producer); } } /** * Verify that we don't continue to fetch more events when there are no listeners. */ @Test public void multipleConsumers() { final int numberOfEvents = 15; final int receiveNumber = 10; final String partitionId = "1"; final List<EventData> events = getEventsAsList(numberOfEvents); final EventPosition position = EventPosition.fromEnqueuedTime(Instant.now()); final EventHubConsumer consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, position); final EventHubConsumer consumer2 = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, position); final EventHubProducer producer = client.createProducer(new EventHubProducerOptions().setPartitionId(partitionId)); try { producer.send(events); final IterableStream<EventData> receive = consumer.receive(receiveNumber, Duration.ofSeconds(5)); final IterableStream<EventData> receive2 = consumer2.receive(receiveNumber, Duration.ofSeconds(5)); final List<Long> asList = receive.stream().map(EventData::getSequenceNumber).collect(Collectors.toList()); final List<Long> asList2 = receive2.stream().map(EventData::getSequenceNumber).collect(Collectors.toList()); Assert.assertEquals(receiveNumber, asList.size()); Assert.assertEquals(receiveNumber, asList2.size()); Collections.sort(asList); Collections.sort(asList2); final Long[] first = asList.toArray(new Long[0]); final Long[] second = asList2.toArray(new Long[0]); Assert.assertArrayEquals(first, second); } finally { dispose(consumer, producer); } } /** * Verify that we can receive until the timeout multiple times. */ @Test private static List<EventData> getEventsAsList(int numberOfEvents) { return TestUtils.getEvents(numberOfEvents, TestUtils.MESSAGE_TRACKING_ID).collectList().block(); } }
Not sure why this is required but if you intended to close out the consumer that was created from previous tests, you can instead use this: ``` @After public void cleanUp() { this.consumer.close(); this.consumer = null; } ```
public void receiveUntilTimeoutMultipleTimes() throws IOException { this.consumer.close(); this.consumer = null; final int numberOfEvents = 15; final int numberOfEvents2 = 3; final String partitionId = "1"; final List<EventData> events = getEventsAsList(numberOfEvents, TestUtils.MESSAGE_TRACKING_ID); final List<EventData> events2 = getEventsAsList(numberOfEvents2, TestUtils.MESSAGE_TRACKING_ID); final EventHubConsumer consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, EventPosition.fromEnqueuedTime(Instant.now())); final EventHubProducer producer = client.createProducer(new EventHubProducerOptions().setPartitionId(partitionId)); try { producer.send(events); final IterableStream<EventData> receive = consumer.receive(100, Duration.ofSeconds(3)); System.out.println("Sending second batch."); producer.send(events2); System.out.println("Receiving second batch."); final IterableStream<EventData> receive2 = consumer.receive(100, Duration.ofSeconds(5)); final List<EventData> asList = receive.stream().collect(Collectors.toList()); Assert.assertEquals(numberOfEvents, asList.size()); final List<EventData> asList2 = receive2.stream().collect(Collectors.toList()); Assert.assertEquals(numberOfEvents2, asList2.size()); } finally { dispose(consumer, producer); } }
this.consumer.close();
public void receiveUntilTimeoutMultipleTimes() { final int numberOfEvents = 15; final int numberOfEvents2 = 3; final String partitionId = "1"; final List<EventData> events = getEventsAsList(numberOfEvents); final List<EventData> events2 = getEventsAsList(numberOfEvents2); final EventHubConsumer consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, EventPosition.fromEnqueuedTime(Instant.now())); final EventHubProducer producer = client.createProducer(new EventHubProducerOptions().setPartitionId(partitionId)); try { producer.send(events); final IterableStream<EventData> receive = consumer.receive(100, Duration.ofSeconds(3)); logger.info("Sending second batch."); producer.send(events2); logger.info("Receiving second batch."); final IterableStream<EventData> receive2 = consumer.receive(100, Duration.ofSeconds(5)); final List<EventData> asList = receive.stream().collect(Collectors.toList()); Assert.assertEquals(numberOfEvents, asList.size()); final List<EventData> asList2 = receive2.stream().collect(Collectors.toList()); Assert.assertEquals(numberOfEvents2, asList2.size()); } finally { dispose(consumer, producer); } }
class EventHubConsumerIntegrationTest extends IntegrationTestBase { private static final String PARTITION_ID = "0"; private static final int NUMBER_OF_EVENTS = 10; private static final AtomicBoolean HAS_PUSHED_EVENTS = new AtomicBoolean(); private static volatile IntegrationTestEventData testData = null; private EventHubClient client; private EventHubConsumer consumer; public EventHubConsumerIntegrationTest() { super(new ClientLogger(EventHubConsumerIntegrationTest.class)); } @Rule public TestName testName = new TestName(); @Override protected String getTestName() { return testName.getMethodName(); } @Override protected void beforeTest() { super.beforeTest(); client = new EventHubClientBuilder() .connectionString(getConnectionString()) .retry(RETRY_OPTIONS) .buildClient(); if (HAS_PUSHED_EVENTS.getAndSet(true)) { logger.info("Already pushed events to partition. Skipping."); } else { final EventHubProducerOptions options = new EventHubProducerOptions().setPartitionId(PARTITION_ID); testData = setupEventTestData(client, NUMBER_OF_EVENTS, options); } consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, PARTITION_ID, EventPosition.fromEnqueuedTime(testData.getEnqueuedTime())); } @Override protected void afterTest() { dispose(consumer, client); } /** * Verifies that we can receive events a single time that is up to the batch size. */ @Test public void receiveEvents() { final int numberOfEvents = 5; final IterableStream<EventData> actual = consumer.receive(numberOfEvents, Duration.ofSeconds(10)); final List<EventData> asList = actual.stream().collect(Collectors.toList()); Assert.assertEquals(numberOfEvents, asList.size()); } /** * Verifies that we can receive multiple times. */ @Test public void receiveEventsMultipleTimes() { final int numberOfEvents = 5; final int secondNumberOfEvents = 2; final Duration waitTime = Duration.ofSeconds(10); final IterableStream<EventData> actual = consumer.receive(numberOfEvents, waitTime); final IterableStream<EventData> actual2 = consumer.receive(secondNumberOfEvents, waitTime); final Map<Long, EventData> asList = actual.stream() .collect(Collectors.toMap(EventData::getSequenceNumber, Function.identity())); Assert.assertEquals(numberOfEvents, asList.size()); final Map<Long, EventData> asList2 = actual2.stream() .collect(Collectors.toMap(EventData::getSequenceNumber, Function.identity())); Assert.assertEquals(secondNumberOfEvents, asList2.size()); final Long maximumSequence = Collections.max(asList.keySet()); final Long minimumSequence = Collections.min(asList2.keySet()); Assert.assertTrue("The minimum in second receive should be less than first receive.", maximumSequence < minimumSequence); } /** * Verify that we can receive until the timeout. */ @Test public void receiveUntilTimeout() { final int numberOfEvents = 15; final String partitionId = "1"; final List<EventData> events = getEventsAsList(numberOfEvents, TestUtils.MESSAGE_TRACKING_ID); final EventPosition position = EventPosition.fromEnqueuedTime(Instant.now()); final EventHubConsumer consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, position); final EventHubProducer producer = client.createProducer(new EventHubProducerOptions().setPartitionId(partitionId)); try { producer.send(events); final IterableStream<EventData> receive = consumer.receive(100, Duration.ofSeconds(5)); final List<EventData> asList = receive.stream().collect(Collectors.toList()); Assert.assertEquals(numberOfEvents, asList.size()); } finally { dispose(producer, consumer); } } /** * Verify that we don't continue to fetch more events when there are no listeners. */ @Test public void doesNotContinueToReceiveEvents() { final int numberOfEvents = 15; final int secondSetOfEvents = 25; final int receiveNumber = 10; final String partitionId = "1"; final List<EventData> events = getEventsAsList(numberOfEvents, TestUtils.MESSAGE_TRACKING_ID); final List<EventData> events2 = getEventsAsList(secondSetOfEvents, TestUtils.MESSAGE_TRACKING_ID); final EventHubConsumer consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, EventPosition.fromEnqueuedTime(Instant.now())); final EventHubProducer producer = client.createProducer(new EventHubProducerOptions().setPartitionId(partitionId)); try { producer.send(events); final IterableStream<EventData> receive = consumer.receive(receiveNumber, Duration.ofSeconds(5)); final List<EventData> asList = receive.stream().collect(Collectors.toList()); Assert.assertEquals(receiveNumber, asList.size()); producer.send(events2); } finally { dispose(consumer, producer); } } /** * Verify that we don't continue to fetch more events when there are no listeners. */ @Test public void multipleConsumers() { final int numberOfEvents = 15; final int receiveNumber = 10; final String partitionId = "1"; final List<EventData> events = getEventsAsList(numberOfEvents, TestUtils.MESSAGE_TRACKING_ID); final EventPosition position = EventPosition.fromEnqueuedTime(Instant.now()); final EventHubConsumer consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, position); final EventHubConsumer consumer2 = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, position); final EventHubProducer producer = client.createProducer(new EventHubProducerOptions().setPartitionId(partitionId)); try { producer.send(events); final IterableStream<EventData> receive = consumer.receive(receiveNumber, Duration.ofSeconds(5)); final IterableStream<EventData> receive2 = consumer2.receive(receiveNumber, Duration.ofSeconds(5)); final List<Long> asList = receive.stream().map(EventData::getSequenceNumber).collect(Collectors.toList()); final List<Long> asList2 = receive2.stream().map(EventData::getSequenceNumber).collect(Collectors.toList()); Assert.assertEquals(receiveNumber, asList.size()); Assert.assertEquals(receiveNumber, asList2.size()); Collections.sort(asList); Collections.sort(asList2); final Long[] first = asList.toArray(new Long[0]); final Long[] second = asList2.toArray(new Long[0]); Assert.assertArrayEquals(first, second); } finally { dispose(consumer, producer); } } /** * Verify that we can receive until the timeout multiple times. */ @Test private static List<EventData> getEventsAsList(int numberOfEvents, String messageId) { return TestUtils.getEvents(numberOfEvents, messageId).collectList().block(); } }
class EventHubConsumerIntegrationTest extends IntegrationTestBase { private static final String PARTITION_ID = "0"; private static final int NUMBER_OF_EVENTS = 10; private static final AtomicBoolean HAS_PUSHED_EVENTS = new AtomicBoolean(); private static volatile IntegrationTestEventData testData = null; private EventHubClient client; private EventHubConsumer consumer; public EventHubConsumerIntegrationTest() { super(new ClientLogger(EventHubConsumerIntegrationTest.class)); } @Rule public TestName testName = new TestName(); @Override protected String getTestName() { return testName.getMethodName(); } @Override protected void beforeTest() { super.beforeTest(); client = new EventHubClientBuilder() .connectionString(getConnectionString()) .scheduler(Schedulers.single()) .retry(RETRY_OPTIONS) .buildClient(); if (HAS_PUSHED_EVENTS.getAndSet(true)) { logger.info("Already pushed events to partition. Skipping."); } else { final EventHubProducerOptions options = new EventHubProducerOptions().setPartitionId(PARTITION_ID); testData = setupEventTestData(client, NUMBER_OF_EVENTS, options); } consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, PARTITION_ID, EventPosition.fromEnqueuedTime(testData.getEnqueuedTime())); } @Override protected void afterTest() { dispose(consumer, client); } /** * Verifies that we can receive events a single time that is up to the batch size. */ @Test public void receiveEvents() { final int numberOfEvents = 5; final IterableStream<EventData> actual = consumer.receive(numberOfEvents, Duration.ofSeconds(10)); final List<EventData> asList = actual.stream().collect(Collectors.toList()); Assert.assertEquals(numberOfEvents, asList.size()); } /** * Verifies that we can receive multiple times. */ @Test public void receiveEventsMultipleTimes() { final int numberOfEvents = 5; final int secondNumberOfEvents = 2; final Duration waitTime = Duration.ofSeconds(10); final IterableStream<EventData> actual = consumer.receive(numberOfEvents, waitTime); final IterableStream<EventData> actual2 = consumer.receive(secondNumberOfEvents, waitTime); final Map<Long, EventData> asList = actual.stream() .collect(Collectors.toMap(EventData::getSequenceNumber, Function.identity())); Assert.assertEquals(numberOfEvents, asList.size()); final Map<Long, EventData> asList2 = actual2.stream() .collect(Collectors.toMap(EventData::getSequenceNumber, Function.identity())); Assert.assertEquals(secondNumberOfEvents, asList2.size()); final Long maximumSequence = Collections.max(asList.keySet()); final Long minimumSequence = Collections.min(asList2.keySet()); Assert.assertTrue("The minimum in second receive should be less than first receive.", maximumSequence < minimumSequence); } /** * Verify that we can receive until the timeout. */ @Test public void receiveUntilTimeout() { final int numberOfEvents = 15; final String partitionId = "1"; final List<EventData> events = getEventsAsList(numberOfEvents); final EventPosition position = EventPosition.fromEnqueuedTime(Instant.now()); final EventHubConsumer consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, position); final EventHubProducer producer = client.createProducer(new EventHubProducerOptions().setPartitionId(partitionId)); try { producer.send(events); final IterableStream<EventData> receive = consumer.receive(100, Duration.ofSeconds(5)); final List<EventData> asList = receive.stream().collect(Collectors.toList()); Assert.assertEquals(numberOfEvents, asList.size()); } finally { dispose(producer, consumer); } } /** * Verify that we don't continue to fetch more events when there are no listeners. */ @Test public void doesNotContinueToReceiveEvents() { final int numberOfEvents = 15; final int secondSetOfEvents = 25; final int receiveNumber = 10; final String partitionId = "1"; final List<EventData> events = getEventsAsList(numberOfEvents); final List<EventData> events2 = getEventsAsList(secondSetOfEvents); final EventHubConsumer consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, EventPosition.fromEnqueuedTime(Instant.now())); final EventHubProducer producer = client.createProducer(new EventHubProducerOptions().setPartitionId(partitionId)); try { producer.send(events); final IterableStream<EventData> receive = consumer.receive(receiveNumber, Duration.ofSeconds(5)); final List<EventData> asList = receive.stream().collect(Collectors.toList()); Assert.assertEquals(receiveNumber, asList.size()); producer.send(events2); } finally { dispose(consumer, producer); } } /** * Verify that we don't continue to fetch more events when there are no listeners. */ @Test public void multipleConsumers() { final int numberOfEvents = 15; final int receiveNumber = 10; final String partitionId = "1"; final List<EventData> events = getEventsAsList(numberOfEvents); final EventPosition position = EventPosition.fromEnqueuedTime(Instant.now()); final EventHubConsumer consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, position); final EventHubConsumer consumer2 = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, position); final EventHubProducer producer = client.createProducer(new EventHubProducerOptions().setPartitionId(partitionId)); try { producer.send(events); final IterableStream<EventData> receive = consumer.receive(receiveNumber, Duration.ofSeconds(5)); final IterableStream<EventData> receive2 = consumer2.receive(receiveNumber, Duration.ofSeconds(5)); final List<Long> asList = receive.stream().map(EventData::getSequenceNumber).collect(Collectors.toList()); final List<Long> asList2 = receive2.stream().map(EventData::getSequenceNumber).collect(Collectors.toList()); Assert.assertEquals(receiveNumber, asList.size()); Assert.assertEquals(receiveNumber, asList2.size()); Collections.sort(asList); Collections.sort(asList2); final Long[] first = asList.toArray(new Long[0]); final Long[] second = asList2.toArray(new Long[0]); Assert.assertArrayEquals(first, second); } finally { dispose(consumer, producer); } } /** * Verify that we can receive until the timeout multiple times. */ @Test private static List<EventData> getEventsAsList(int numberOfEvents) { return TestUtils.getEvents(numberOfEvents, TestUtils.MESSAGE_TRACKING_ID).collectList().block(); } }
Oh, that was because I don't use the consumer created in beforeTest() and create my own. I'll remove it. It's not necessary.
public void receiveUntilTimeoutMultipleTimes() throws IOException { this.consumer.close(); this.consumer = null; final int numberOfEvents = 15; final int numberOfEvents2 = 3; final String partitionId = "1"; final List<EventData> events = getEventsAsList(numberOfEvents, TestUtils.MESSAGE_TRACKING_ID); final List<EventData> events2 = getEventsAsList(numberOfEvents2, TestUtils.MESSAGE_TRACKING_ID); final EventHubConsumer consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, EventPosition.fromEnqueuedTime(Instant.now())); final EventHubProducer producer = client.createProducer(new EventHubProducerOptions().setPartitionId(partitionId)); try { producer.send(events); final IterableStream<EventData> receive = consumer.receive(100, Duration.ofSeconds(3)); System.out.println("Sending second batch."); producer.send(events2); System.out.println("Receiving second batch."); final IterableStream<EventData> receive2 = consumer.receive(100, Duration.ofSeconds(5)); final List<EventData> asList = receive.stream().collect(Collectors.toList()); Assert.assertEquals(numberOfEvents, asList.size()); final List<EventData> asList2 = receive2.stream().collect(Collectors.toList()); Assert.assertEquals(numberOfEvents2, asList2.size()); } finally { dispose(consumer, producer); } }
this.consumer.close();
public void receiveUntilTimeoutMultipleTimes() { final int numberOfEvents = 15; final int numberOfEvents2 = 3; final String partitionId = "1"; final List<EventData> events = getEventsAsList(numberOfEvents); final List<EventData> events2 = getEventsAsList(numberOfEvents2); final EventHubConsumer consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, EventPosition.fromEnqueuedTime(Instant.now())); final EventHubProducer producer = client.createProducer(new EventHubProducerOptions().setPartitionId(partitionId)); try { producer.send(events); final IterableStream<EventData> receive = consumer.receive(100, Duration.ofSeconds(3)); logger.info("Sending second batch."); producer.send(events2); logger.info("Receiving second batch."); final IterableStream<EventData> receive2 = consumer.receive(100, Duration.ofSeconds(5)); final List<EventData> asList = receive.stream().collect(Collectors.toList()); Assert.assertEquals(numberOfEvents, asList.size()); final List<EventData> asList2 = receive2.stream().collect(Collectors.toList()); Assert.assertEquals(numberOfEvents2, asList2.size()); } finally { dispose(consumer, producer); } }
class EventHubConsumerIntegrationTest extends IntegrationTestBase { private static final String PARTITION_ID = "0"; private static final int NUMBER_OF_EVENTS = 10; private static final AtomicBoolean HAS_PUSHED_EVENTS = new AtomicBoolean(); private static volatile IntegrationTestEventData testData = null; private EventHubClient client; private EventHubConsumer consumer; public EventHubConsumerIntegrationTest() { super(new ClientLogger(EventHubConsumerIntegrationTest.class)); } @Rule public TestName testName = new TestName(); @Override protected String getTestName() { return testName.getMethodName(); } @Override protected void beforeTest() { super.beforeTest(); client = new EventHubClientBuilder() .connectionString(getConnectionString()) .retry(RETRY_OPTIONS) .buildClient(); if (HAS_PUSHED_EVENTS.getAndSet(true)) { logger.info("Already pushed events to partition. Skipping."); } else { final EventHubProducerOptions options = new EventHubProducerOptions().setPartitionId(PARTITION_ID); testData = setupEventTestData(client, NUMBER_OF_EVENTS, options); } consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, PARTITION_ID, EventPosition.fromEnqueuedTime(testData.getEnqueuedTime())); } @Override protected void afterTest() { dispose(consumer, client); } /** * Verifies that we can receive events a single time that is up to the batch size. */ @Test public void receiveEvents() { final int numberOfEvents = 5; final IterableStream<EventData> actual = consumer.receive(numberOfEvents, Duration.ofSeconds(10)); final List<EventData> asList = actual.stream().collect(Collectors.toList()); Assert.assertEquals(numberOfEvents, asList.size()); } /** * Verifies that we can receive multiple times. */ @Test public void receiveEventsMultipleTimes() { final int numberOfEvents = 5; final int secondNumberOfEvents = 2; final Duration waitTime = Duration.ofSeconds(10); final IterableStream<EventData> actual = consumer.receive(numberOfEvents, waitTime); final IterableStream<EventData> actual2 = consumer.receive(secondNumberOfEvents, waitTime); final Map<Long, EventData> asList = actual.stream() .collect(Collectors.toMap(EventData::getSequenceNumber, Function.identity())); Assert.assertEquals(numberOfEvents, asList.size()); final Map<Long, EventData> asList2 = actual2.stream() .collect(Collectors.toMap(EventData::getSequenceNumber, Function.identity())); Assert.assertEquals(secondNumberOfEvents, asList2.size()); final Long maximumSequence = Collections.max(asList.keySet()); final Long minimumSequence = Collections.min(asList2.keySet()); Assert.assertTrue("The minimum in second receive should be less than first receive.", maximumSequence < minimumSequence); } /** * Verify that we can receive until the timeout. */ @Test public void receiveUntilTimeout() { final int numberOfEvents = 15; final String partitionId = "1"; final List<EventData> events = getEventsAsList(numberOfEvents, TestUtils.MESSAGE_TRACKING_ID); final EventPosition position = EventPosition.fromEnqueuedTime(Instant.now()); final EventHubConsumer consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, position); final EventHubProducer producer = client.createProducer(new EventHubProducerOptions().setPartitionId(partitionId)); try { producer.send(events); final IterableStream<EventData> receive = consumer.receive(100, Duration.ofSeconds(5)); final List<EventData> asList = receive.stream().collect(Collectors.toList()); Assert.assertEquals(numberOfEvents, asList.size()); } finally { dispose(producer, consumer); } } /** * Verify that we don't continue to fetch more events when there are no listeners. */ @Test public void doesNotContinueToReceiveEvents() { final int numberOfEvents = 15; final int secondSetOfEvents = 25; final int receiveNumber = 10; final String partitionId = "1"; final List<EventData> events = getEventsAsList(numberOfEvents, TestUtils.MESSAGE_TRACKING_ID); final List<EventData> events2 = getEventsAsList(secondSetOfEvents, TestUtils.MESSAGE_TRACKING_ID); final EventHubConsumer consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, EventPosition.fromEnqueuedTime(Instant.now())); final EventHubProducer producer = client.createProducer(new EventHubProducerOptions().setPartitionId(partitionId)); try { producer.send(events); final IterableStream<EventData> receive = consumer.receive(receiveNumber, Duration.ofSeconds(5)); final List<EventData> asList = receive.stream().collect(Collectors.toList()); Assert.assertEquals(receiveNumber, asList.size()); producer.send(events2); } finally { dispose(consumer, producer); } } /** * Verify that we don't continue to fetch more events when there are no listeners. */ @Test public void multipleConsumers() { final int numberOfEvents = 15; final int receiveNumber = 10; final String partitionId = "1"; final List<EventData> events = getEventsAsList(numberOfEvents, TestUtils.MESSAGE_TRACKING_ID); final EventPosition position = EventPosition.fromEnqueuedTime(Instant.now()); final EventHubConsumer consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, position); final EventHubConsumer consumer2 = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, position); final EventHubProducer producer = client.createProducer(new EventHubProducerOptions().setPartitionId(partitionId)); try { producer.send(events); final IterableStream<EventData> receive = consumer.receive(receiveNumber, Duration.ofSeconds(5)); final IterableStream<EventData> receive2 = consumer2.receive(receiveNumber, Duration.ofSeconds(5)); final List<Long> asList = receive.stream().map(EventData::getSequenceNumber).collect(Collectors.toList()); final List<Long> asList2 = receive2.stream().map(EventData::getSequenceNumber).collect(Collectors.toList()); Assert.assertEquals(receiveNumber, asList.size()); Assert.assertEquals(receiveNumber, asList2.size()); Collections.sort(asList); Collections.sort(asList2); final Long[] first = asList.toArray(new Long[0]); final Long[] second = asList2.toArray(new Long[0]); Assert.assertArrayEquals(first, second); } finally { dispose(consumer, producer); } } /** * Verify that we can receive until the timeout multiple times. */ @Test private static List<EventData> getEventsAsList(int numberOfEvents, String messageId) { return TestUtils.getEvents(numberOfEvents, messageId).collectList().block(); } }
class EventHubConsumerIntegrationTest extends IntegrationTestBase { private static final String PARTITION_ID = "0"; private static final int NUMBER_OF_EVENTS = 10; private static final AtomicBoolean HAS_PUSHED_EVENTS = new AtomicBoolean(); private static volatile IntegrationTestEventData testData = null; private EventHubClient client; private EventHubConsumer consumer; public EventHubConsumerIntegrationTest() { super(new ClientLogger(EventHubConsumerIntegrationTest.class)); } @Rule public TestName testName = new TestName(); @Override protected String getTestName() { return testName.getMethodName(); } @Override protected void beforeTest() { super.beforeTest(); client = new EventHubClientBuilder() .connectionString(getConnectionString()) .scheduler(Schedulers.single()) .retry(RETRY_OPTIONS) .buildClient(); if (HAS_PUSHED_EVENTS.getAndSet(true)) { logger.info("Already pushed events to partition. Skipping."); } else { final EventHubProducerOptions options = new EventHubProducerOptions().setPartitionId(PARTITION_ID); testData = setupEventTestData(client, NUMBER_OF_EVENTS, options); } consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, PARTITION_ID, EventPosition.fromEnqueuedTime(testData.getEnqueuedTime())); } @Override protected void afterTest() { dispose(consumer, client); } /** * Verifies that we can receive events a single time that is up to the batch size. */ @Test public void receiveEvents() { final int numberOfEvents = 5; final IterableStream<EventData> actual = consumer.receive(numberOfEvents, Duration.ofSeconds(10)); final List<EventData> asList = actual.stream().collect(Collectors.toList()); Assert.assertEquals(numberOfEvents, asList.size()); } /** * Verifies that we can receive multiple times. */ @Test public void receiveEventsMultipleTimes() { final int numberOfEvents = 5; final int secondNumberOfEvents = 2; final Duration waitTime = Duration.ofSeconds(10); final IterableStream<EventData> actual = consumer.receive(numberOfEvents, waitTime); final IterableStream<EventData> actual2 = consumer.receive(secondNumberOfEvents, waitTime); final Map<Long, EventData> asList = actual.stream() .collect(Collectors.toMap(EventData::getSequenceNumber, Function.identity())); Assert.assertEquals(numberOfEvents, asList.size()); final Map<Long, EventData> asList2 = actual2.stream() .collect(Collectors.toMap(EventData::getSequenceNumber, Function.identity())); Assert.assertEquals(secondNumberOfEvents, asList2.size()); final Long maximumSequence = Collections.max(asList.keySet()); final Long minimumSequence = Collections.min(asList2.keySet()); Assert.assertTrue("The minimum in second receive should be less than first receive.", maximumSequence < minimumSequence); } /** * Verify that we can receive until the timeout. */ @Test public void receiveUntilTimeout() { final int numberOfEvents = 15; final String partitionId = "1"; final List<EventData> events = getEventsAsList(numberOfEvents); final EventPosition position = EventPosition.fromEnqueuedTime(Instant.now()); final EventHubConsumer consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, position); final EventHubProducer producer = client.createProducer(new EventHubProducerOptions().setPartitionId(partitionId)); try { producer.send(events); final IterableStream<EventData> receive = consumer.receive(100, Duration.ofSeconds(5)); final List<EventData> asList = receive.stream().collect(Collectors.toList()); Assert.assertEquals(numberOfEvents, asList.size()); } finally { dispose(producer, consumer); } } /** * Verify that we don't continue to fetch more events when there are no listeners. */ @Test public void doesNotContinueToReceiveEvents() { final int numberOfEvents = 15; final int secondSetOfEvents = 25; final int receiveNumber = 10; final String partitionId = "1"; final List<EventData> events = getEventsAsList(numberOfEvents); final List<EventData> events2 = getEventsAsList(secondSetOfEvents); final EventHubConsumer consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, EventPosition.fromEnqueuedTime(Instant.now())); final EventHubProducer producer = client.createProducer(new EventHubProducerOptions().setPartitionId(partitionId)); try { producer.send(events); final IterableStream<EventData> receive = consumer.receive(receiveNumber, Duration.ofSeconds(5)); final List<EventData> asList = receive.stream().collect(Collectors.toList()); Assert.assertEquals(receiveNumber, asList.size()); producer.send(events2); } finally { dispose(consumer, producer); } } /** * Verify that we don't continue to fetch more events when there are no listeners. */ @Test public void multipleConsumers() { final int numberOfEvents = 15; final int receiveNumber = 10; final String partitionId = "1"; final List<EventData> events = getEventsAsList(numberOfEvents); final EventPosition position = EventPosition.fromEnqueuedTime(Instant.now()); final EventHubConsumer consumer = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, position); final EventHubConsumer consumer2 = client.createConsumer(DEFAULT_CONSUMER_GROUP_NAME, partitionId, position); final EventHubProducer producer = client.createProducer(new EventHubProducerOptions().setPartitionId(partitionId)); try { producer.send(events); final IterableStream<EventData> receive = consumer.receive(receiveNumber, Duration.ofSeconds(5)); final IterableStream<EventData> receive2 = consumer2.receive(receiveNumber, Duration.ofSeconds(5)); final List<Long> asList = receive.stream().map(EventData::getSequenceNumber).collect(Collectors.toList()); final List<Long> asList2 = receive2.stream().map(EventData::getSequenceNumber).collect(Collectors.toList()); Assert.assertEquals(receiveNumber, asList.size()); Assert.assertEquals(receiveNumber, asList2.size()); Collections.sort(asList); Collections.sort(asList2); final Long[] first = asList.toArray(new Long[0]); final Long[] second = asList2.toArray(new Long[0]); Assert.assertArrayEquals(first, second); } finally { dispose(consumer, producer); } } /** * Verify that we can receive until the timeout multiple times. */ @Test private static List<EventData> getEventsAsList(int numberOfEvents) { return TestUtils.getEvents(numberOfEvents, TestUtils.MESSAGE_TRACKING_ID).collectList().block(); } }
Do we split out the share and directory path in the URL? If not we don't need this `String.format`.
public URL getDirectoryUrl() { String directoryURLString = String.format("%s/%s/%s", azureFileStorageClient.getUrl(), shareName, directoryPath); if (snapshot != null) { directoryURLString = String.format("%s?snapshot=%s", directoryURLString, snapshot); } try { return new URL(directoryURLString); } catch (MalformedURLException e) { throw logger.logExceptionAsError(new RuntimeException( String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), directoryURLString), e)); } }
String directoryURLString = String.format("%s/%s/%s", azureFileStorageClient.getUrl(),
public URL getDirectoryUrl() { StringBuilder directoryURLString = new StringBuilder(azureFileStorageClient.getUrl()).append("/") .append(shareName).append("/").append(directoryPath); if (snapshot != null) { directoryURLString.append("?snapshot=").append(snapshot); } try { return new URL(directoryURLString.toString()); } catch (MalformedURLException e) { throw logger.logExceptionAsError(new RuntimeException( String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), directoryURLString), e)); } }
class DirectoryAsyncClient { private final ClientLogger logger = new ClientLogger(DirectoryAsyncClient.class); private final AzureFileStorageImpl azureFileStorageClient; private final String shareName; private final String directoryPath; private final String snapshot; /** * Creates a DirectoryAsyncClient that sends requests to the storage directory at {@link * AzureFileStorageImpl * {@code client}. * * @param azureFileStorageClient Client that interacts with the service interfaces * @param shareName Name of the share * @param directoryPath Name of the directory * @param snapshot The snapshot of the share */ DirectoryAsyncClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String directoryPath, String snapshot) { Objects.requireNonNull(shareName); Objects.requireNonNull(directoryPath); this.shareName = shareName; this.directoryPath = directoryPath; this.snapshot = snapshot; this.azureFileStorageClient = azureFileStorageClient; } /** * Get the url of the storage directory client. * * @return the URL of the storage directory client * @throws RuntimeException If the directory is using a malformed URL. */ /** * Constructs a FileAsyncClient that interacts with the specified file. * * <p>If the file doesn't exist in this directory {@link FileAsyncClient
class DirectoryAsyncClient { private final ClientLogger logger = new ClientLogger(DirectoryAsyncClient.class); private final AzureFileStorageImpl azureFileStorageClient; private final String shareName; private final String directoryPath; private final String snapshot; /** * Creates a DirectoryAsyncClient that sends requests to the storage directory at {@link * AzureFileStorageImpl * {@code client}. * * @param azureFileStorageClient Client that interacts with the service interfaces * @param shareName Name of the share * @param directoryPath Name of the directory * @param snapshot The snapshot of the share */ DirectoryAsyncClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String directoryPath, String snapshot) { Objects.requireNonNull(shareName); Objects.requireNonNull(directoryPath); this.shareName = shareName; this.directoryPath = directoryPath; this.snapshot = snapshot; this.azureFileStorageClient = azureFileStorageClient; } /** * Get the url of the storage directory client. * * @return the URL of the storage directory client * @throws RuntimeException If the directory is using a malformed URL. */ /** * Constructs a FileAsyncClient that interacts with the specified file. * * <p>If the file doesn't exist in this directory {@link FileAsyncClient
Same question as before
public URL getFileUrl() { String fileURLString = String.format("%s/%s/%s", azureFileStorageClient.getUrl(), shareName, filePath); if (snapshot != null) { fileURLString = String.format("%s?snapshot=%s", fileURLString, snapshot); } try { return new URL(fileURLString); } catch (MalformedURLException e) { throw logger.logExceptionAsError(new RuntimeException( String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), fileURLString), e)); } }
String fileURLString = String.format("%s/%s/%s", azureFileStorageClient.getUrl(), shareName, filePath);
public URL getFileUrl() { StringBuilder fileURLString = new StringBuilder(azureFileStorageClient.getUrl()).append("/") .append(shareName).append("/").append(filePath); if (snapshot != null) { fileURLString.append("?snapshot=").append(snapshot); } try { return new URL(fileURLString.toString()); } catch (MalformedURLException e) { throw logger.logExceptionAsError(new RuntimeException( String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), fileURLString), e)); } }
class FileAsyncClient { private final ClientLogger logger = new ClientLogger(FileAsyncClient.class); private static final long FILE_DEFAULT_BLOCK_SIZE = 4 * 1024 * 1024L; private static final long DOWNLOAD_UPLOAD_CHUNK_TIMEOUT = 300; private final AzureFileStorageImpl azureFileStorageClient; private final String shareName; private final String filePath; private final String snapshot; /** * Creates a FileAsyncClient that sends requests to the storage file at {@link AzureFileStorageImpl * endpoint}. Each service call goes through the {@link HttpPipeline pipeline} in the {@code client}. * * @param azureFileStorageClient Client that interacts with the service interfaces * @param shareName Name of the share * @param filePath Path to the file * @param snapshot The snapshot of the share */ FileAsyncClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String filePath, String snapshot) { Objects.requireNonNull(shareName); Objects.requireNonNull(filePath); this.shareName = shareName; this.filePath = filePath; this.snapshot = snapshot; this.azureFileStorageClient = azureFileStorageClient; } /** * Get the url of the storage file client. * * @return the URL of the storage file client * @throws RuntimeException If the file is using a malformed URL. */ /** * Creates a file in the storage account and returns a response of {@link FileInfo} to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with size 1KB.</p> * * {@codesnippet com.azure.storage.file.fileClient.create} * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @return A response containing the file info and the status of creating the file. * @throws StorageException If the file has already existed, the parent directory does not exist or fileName is an * invalid resource name. */ public Mono<FileInfo> create(long maxSize) { return createWithResponse(maxSize, null, null, null, null).flatMap(FluxUtil::toMono); } /** * Creates a file in the storage account and returns a response of FileInfo to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with length of 1024 bytes, some headers, file smb properties and metadata.</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.createWithResponse * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @param httpHeaders The user settable file http headers. * @param smbProperties The user settable file smb properties. * @param filePermission The file permission of the file. * @param metadata Optional name-value pairs associated with the file as metadata. * @return A response containing the {@link FileInfo file info} and the status of creating the file. * @throws StorageException If the directory has already existed, the parent directory does not exist or directory * is an invalid resource name. */ public Mono<Response<FileInfo>> createWithResponse(long maxSize, FileHTTPHeaders httpHeaders, FileSmbProperties smbProperties, String filePermission, Map<String, String> metadata) { return withContext(context -> createWithResponse(maxSize, httpHeaders, smbProperties, filePermission, metadata, context)); } Mono<Response<FileInfo>> createWithResponse(long maxSize, FileHTTPHeaders httpHeaders, FileSmbProperties smbProperties, String filePermission, Map<String, String> metadata, Context context) { smbProperties = smbProperties == null ? new FileSmbProperties() : smbProperties; filePermissionAndKeyHelper(filePermission, smbProperties.getFilePermissionKey()); filePermission = smbProperties.setFilePermission(filePermission, FileConstants.FILE_PERMISSION_INHERIT); String filePermissionKey = smbProperties.getFilePermissionKey(); String fileAttributes = smbProperties.setNtfsFileAttributes(FileConstants.FILE_ATTRIBUTES_NONE); String fileCreationTime = smbProperties.setFileCreationTime(FileConstants.FILE_TIME_NOW); String fileLastWriteTime = smbProperties.setFileLastWriteTime(FileConstants.FILE_TIME_NOW); return postProcessResponse(azureFileStorageClient.files() .createWithRestResponseAsync(shareName, filePath, maxSize, fileAttributes, fileCreationTime, fileLastWriteTime, null, metadata, filePermission, filePermissionKey, httpHeaders, context)) .map(this::createFileInfoResponse); } /** * Copies a blob or file to a destination file within the storage account. * * <p><strong>Code Samples</strong></p> * * <p>Copy file from source url to the {@code resourcePath} </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.startCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the * naming rules. * @return The {@link FileCopyInfo file copy info}. * @see <a href="https: */ public Mono<FileCopyInfo> startCopy(String sourceUrl, Map<String, String> metadata) { return startCopyWithResponse(sourceUrl, metadata).flatMap(FluxUtil::toMono); } /** * Copies a blob or file to a destination file within the storage account. * * <p><strong>Code Samples</strong></p> * * <p>Copy file from source url to the {@code resourcePath} </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.startCopyWithResponse * * <p>For more information, see the * <a href="https: * * @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the * naming rules. * @return A response containing the {@link FileCopyInfo file copy info} and the status of copying the file. * @see <a href="https: */ public Mono<Response<FileCopyInfo>> startCopyWithResponse(String sourceUrl, Map<String, String> metadata) { return withContext(context -> startCopyWithResponse(sourceUrl, metadata, context)); } Mono<Response<FileCopyInfo>> startCopyWithResponse(String sourceUrl, Map<String, String> metadata, Context context) { return postProcessResponse(azureFileStorageClient.files() .startCopyWithRestResponseAsync(shareName, filePath, sourceUrl, null, metadata, context)) .map(this::startCopyResponse); } /** * Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. * * <p><strong>Code Samples</strong></p> * * <p>Abort copy file from copy id("someCopyId") </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.abortCopy * * <p>For more information, see the * <a href="https: * * @param copyId Specifies the copy id which has copying pending status associate with it. * @return An empty response. */ public Mono<Void> abortCopy(String copyId) { return abortCopyWithResponse(copyId).flatMap(FluxUtil::toMono); } /** * Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. * * <p><strong>Code Samples</strong></p> * * <p>Abort copy file from copy id("someCopyId") </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.abortCopyWithResponse * * <p>For more information, see the * <a href="https: * * @param copyId Specifies the copy id which has copying pending status associate with it. * @return A response containing the status of aborting copy the file. */ public Mono<VoidResponse> abortCopyWithResponse(String copyId) { return withContext(context -> abortCopyWithResponse(copyId, context)); } Mono<VoidResponse> abortCopyWithResponse(String copyId, Context context) { return postProcessResponse(azureFileStorageClient.files() .abortCopyWithRestResponseAsync(shareName, filePath, copyId, context)) .map(VoidResponse::new); } /** * Downloads a file from the system, including its metadata and properties into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p><strong>Code Samples</strong></p> * * <p>Download the file to current folder. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @return An empty response. */ public Mono<Void> downloadToFile(String downloadFilePath) { return downloadToFile(downloadFilePath, null); } /** * Downloads a file from the system, including its metadata and properties into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes to current folder. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @param range Optional byte range which returns file data only from the specified range. * @return An empty response. */ public Mono<Void> downloadToFile(String downloadFilePath, FileRange range) { return Mono.using(() -> channelSetup(downloadFilePath, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW), channel -> sliceFileRange(range) .flatMap(chunk -> downloadWithPropertiesWithResponse(chunk, false) .map(dar -> dar.getValue().getBody()) .subscribeOn(Schedulers.elastic()) .flatMap(fbb -> FluxUtil .writeFile(fbb, channel, chunk.getStart() - (range == null ? 0 : range.getStart())) .subscribeOn(Schedulers.elastic()) .timeout(Duration.ofSeconds(DOWNLOAD_UPLOAD_CHUNK_TIMEOUT)) .retry(3, throwable -> throwable instanceof IOException || throwable instanceof TimeoutException))) .then(), this::channelCleanUp); } private AsynchronousFileChannel channelSetup(String filePath, OpenOption... options) { try { return AsynchronousFileChannel.open(Paths.get(filePath), options); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } private void channelCleanUp(AsynchronousFileChannel channel) { try { channel.close(); } catch (IOException e) { throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(e))); } } private Flux<FileRange> sliceFileRange(FileRange fileRange) { long offset = fileRange == null ? 0L : fileRange.getStart(); Mono<Long> end; if (fileRange != null) { end = Mono.just(fileRange.getEnd()); } else { end = Mono.empty(); } end = end.switchIfEmpty(getProperties().map(FileProperties::getContentLength)); return end .map(e -> { List<FileRange> chunks = new ArrayList<>(); for (long pos = offset; pos < e; pos += FILE_DEFAULT_BLOCK_SIZE) { long count = FILE_DEFAULT_BLOCK_SIZE; if (pos + count > e) { count = e - pos; } chunks.add(new FileRange(pos, pos + count - 1)); } return chunks; }) .flatMapMany(Flux::fromIterable); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file with its metadata and properties. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadWithProperties} * * <p>For more information, see the * <a href="https: * * @return The {@link FileDownloadInfo file download Info} */ public Mono<FileDownloadInfo> downloadWithProperties() { return downloadWithPropertiesWithResponse(null, null).flatMap(FluxUtil::toMono); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadWithPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param range Optional byte range which returns file data only from the specified range. * @param rangeGetContentMD5 Optional boolean which the service returns the MD5 hash for the range when it sets to * true, as long as the range is less than or equal to 4 MB in size. * @return A response containing the {@link FileDownloadInfo file download Info} with headers and response status * code */ public Mono<Response<FileDownloadInfo>> downloadWithPropertiesWithResponse(FileRange range, Boolean rangeGetContentMD5) { return withContext(context -> downloadWithPropertiesWithResponse(range, rangeGetContentMD5, context)); } Mono<Response<FileDownloadInfo>> downloadWithPropertiesWithResponse(FileRange range, Boolean rangeGetContentMD5, Context context) { String rangeString = range == null ? null : range.toString(); return postProcessResponse(azureFileStorageClient.files() .downloadWithRestResponseAsync(shareName, filePath, null, rangeString, rangeGetContentMD5, context)) .map(this::downloadWithPropertiesResponse); } /** * Deletes the file associate with the client. * * <p><strong>Code Samples</strong></p> * * <p>Delete the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.delete} * * <p>For more information, see the * <a href="https: * * @return An empty response * @throws StorageException If the directory doesn't exist or the file doesn't exist. */ public Mono<Void> delete() { return deleteWithResponse(null).flatMap(FluxUtil::toMono); } /** * Deletes the file associate with the client. * * <p><strong>Code Samples</strong></p> * * <p>Delete the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.deleteWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response that only contains headers and response status code * @throws StorageException If the directory doesn't exist or the file doesn't exist. */ public Mono<VoidResponse> deleteWithResponse() { return withContext(this::deleteWithResponse); } Mono<VoidResponse> deleteWithResponse(Context context) { return postProcessResponse(azureFileStorageClient.files() .deleteWithRestResponseAsync(shareName, filePath, context)) .map(VoidResponse::new); } /** * Retrieves the properties of the storage account's file. The properties includes file metadata, last modified * date, is server encrypted, and eTag. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve file properties</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.getProperties} * * <p>For more information, see the * <a href="https: * * @return {@link FileProperties Storage file properties} */ public Mono<FileProperties> getProperties() { return getPropertiesWithResponse().flatMap(FluxUtil::toMono); } /** * Retrieves the properties of the storage account's file. The properties includes file metadata, last modified * date, is server encrypted, and eTag. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve file properties</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.getPropertiesWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response containing the {@link FileProperties storage file properties} and response status code */ public Mono<Response<FileProperties>> getPropertiesWithResponse() { return withContext(this::getPropertiesWithResponse); } Mono<Response<FileProperties>> getPropertiesWithResponse(Context context) { return postProcessResponse(azureFileStorageClient.files() .getPropertiesWithRestResponseAsync(shareName, filePath, snapshot, null, context)) .map(this::getPropertiesResponse); } /** * Sets the user-defined file properties to associate to the file. * * <p>If {@code null} is passed for the fileProperties.httpHeaders it will clear the httpHeaders associated to the * file. * If {@code null} is passed for the fileProperties.filesmbproperties it will preserve the filesmb properties * associated with the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the httpHeaders of contentType of "text/plain"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setProperties * * <p>Clear the metadata of the file and preserve the SMB properties</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setProperties * * <p>For more information, see the * <a href="https: * * @param newFileSize New file size of the file * @param httpHeaders The user settable file http headers. * @param smbProperties The user settable file smb properties. * @param filePermission The file permission of the file * @return The {@link FileInfo file info} * @throws IllegalArgumentException thrown if parameters fail the validation. */ public Mono<FileInfo> setProperties(long newFileSize, FileHTTPHeaders httpHeaders, FileSmbProperties smbProperties, String filePermission) { return setPropertiesWithResponse(newFileSize, httpHeaders, smbProperties, filePermission) .flatMap(FluxUtil::toMono); } /** * Sets the user-defined file properties to associate to the file. * * <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file. * If {@code null} is passed for the filesmbproperties it will preserve the filesmbproperties associated with the * file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the httpHeaders of contentType of "text/plain"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setPropertiesWithResponse * * <p>Clear the metadata of the file and preserve the SMB properties</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param newFileSize New file size of the file. * @param httpHeaders The user settable file http headers. * @param smbProperties The user settable file smb properties. * @param filePermission The file permission of the file. * @return Response containing the {@link FileInfo file info} and response status code. * @throws IllegalArgumentException thrown if parameters fail the validation. */ public Mono<Response<FileInfo>> setPropertiesWithResponse(long newFileSize, FileHTTPHeaders httpHeaders, FileSmbProperties smbProperties, String filePermission) { return withContext(context -> setPropertiesWithResponse(newFileSize, httpHeaders, smbProperties, filePermission, context)); } Mono<Response<FileInfo>> setPropertiesWithResponse(long newFileSize, FileHTTPHeaders httpHeaders, FileSmbProperties smbProperties, String filePermission, Context context) { smbProperties = smbProperties == null ? new FileSmbProperties() : smbProperties; filePermissionAndKeyHelper(filePermission, smbProperties.getFilePermissionKey()); filePermission = smbProperties.setFilePermission(filePermission, FileConstants.PRESERVE); String filePermissionKey = smbProperties.getFilePermissionKey(); String fileAttributes = smbProperties.setNtfsFileAttributes(FileConstants.PRESERVE); String fileCreationTime = smbProperties.setFileCreationTime(FileConstants.PRESERVE); String fileLastWriteTime = smbProperties.setFileLastWriteTime(FileConstants.PRESERVE); return postProcessResponse(azureFileStorageClient.files() .setHTTPHeadersWithRestResponseAsync(shareName, filePath, fileAttributes, fileCreationTime, fileLastWriteTime, null, newFileSize, filePermission, filePermissionKey, httpHeaders, context)) .map(this::setPropertiesResponse); } /** * Sets the user-defined metadata to associate to the file. * * <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the metadata to "file:updatedMetadata"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadata * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared * @return {@link FileMetadataInfo file meta info} * @throws StorageException If the file doesn't exist or the metadata contains invalid keys */ public Mono<FileMetadataInfo> setMetadata(Map<String, String> metadata) { return setMetadataWithResponse(metadata).flatMap(FluxUtil::toMono); } /** * Sets the user-defined metadata to associate to the file. * * <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the metadata to "file:updatedMetadata"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadataWithResponse * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared * @return A response containing the {@link FileMetadataInfo file meta info} and status code * @throws StorageException If the file doesn't exist or the metadata contains invalid keys */ public Mono<Response<FileMetadataInfo>> setMetadataWithResponse(Map<String, String> metadata) { return withContext(context -> setMetadataWithResponse(metadata, context)); } Mono<Response<FileMetadataInfo>> setMetadataWithResponse(Map<String, String> metadata, Context context) { return postProcessResponse(azureFileStorageClient.files() .setMetadataWithRestResponseAsync(shareName, filePath, null, metadata, context)) .map(this::setMetadataResponse); } /** * Uploads a range of bytes to the beginning of a file in storage file service. Upload operations performs an * in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload data "default" to the file in Storage File Service. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.upload * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. * @return A response that only contains headers and response status code */ public Mono<FileUploadInfo> upload(Flux<ByteBuffer> data, long length) { return uploadWithResponse(data, length).flatMap(FluxUtil::toMono); } /** * Uploads a range of bytes to the beginning of a file in storage file service. Upload operations performs an * in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload "default" to the file. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.uploadWithResponse * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. When the FileRangeWriteType is * set to clear, the value of this header must be set to zero.. * @return A response containing the {@link FileUploadInfo file upload info} with headers and response status code * @throws StorageException If you attempt to upload a range that is larger than 4 MB, the service returns status * code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length) { return withContext(context -> uploadWithResponse(data, length, context)); } Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length, Context context) { FileRange range = new FileRange(0, length - 1); return postProcessResponse(azureFileStorageClient.files() .uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, length, data, null, null, context)) .map(this::uploadResponse); } /** * Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload data "default" starting from 1024 bytes. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.upload * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. * @param offset Optional starting point of the upload range. It will start from the beginning if it is * {@code null} * @return The {@link FileUploadInfo file upload info} * @throws StorageException If you attempt to upload a range that is larger than 4 MB, the service returns status * code 413 (Request Entity Too Large) */ public Mono<FileUploadInfo> upload(Flux<ByteBuffer> data, long length, long offset) { return uploadWithResponse(data, length, offset).flatMap(FluxUtil::toMono); } /** * Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.uploadWithResponse * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param offset Optional starting point of the upload range. It will start from the beginning if it is * {@code null} * @param length Specifies the number of bytes being transmitted in the request body. When the FileRangeWriteType is * set to clear, the value of this header must be set to zero. * @return A response containing the {@link FileUploadInfo file upload info} with headers and response status code * @throws StorageException If you attempt to upload a range that is larger than 4 MB, the service returns status * code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length, long offset) { return withContext(context -> uploadWithResponse(data, length, offset, context)); } Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length, long offset, Context context) { FileRange range = new FileRange(offset, offset + length - 1); return postProcessResponse(azureFileStorageClient.files() .uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, length, data, null, null, context)) .map(this::uploadResponse); } /** * Uploads a range of bytes from one file to another file. * * <p><strong>Code Samples</strong></p> * * <p>Upload a number of bytes from a file at defined source and destination offsets </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.uploadRangeFromURL * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being transmitted in the request body. * @param destinationOffset Starting point of the upload range on the destination. * @param sourceOffset Starting point of the upload range on the source. * @param sourceURI Specifies the URL of the source file. * @return The {@link FileUploadRangeFromURLInfo file upload range from url info} */ public Mono<FileUploadRangeFromURLInfo> uploadRangeFromURL(long length, long destinationOffset, long sourceOffset, URI sourceURI) { return uploadRangeFromURLWithResponse(length, destinationOffset, sourceOffset, sourceURI) .flatMap(FluxUtil::toMono); } /** * Uploads a range of bytes from one file to another file. * * <p><strong>Code Samples</strong></p> * * <p>Upload a number of bytes from a file at defined source and destination offsets </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.uploadRangeFromURLWithResponse * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being transmitted in the request body. * @param destinationOffset Starting point of the upload range on the destination. * @param sourceOffset Starting point of the upload range on the source. * @param sourceURI Specifies the URL of the source file. * @return A response containing the {@link FileUploadRangeFromURLInfo file upload range from url info} with headers * and response status code. */ public Mono<Response<FileUploadRangeFromURLInfo>> uploadRangeFromURLWithResponse(long length, long destinationOffset, long sourceOffset, URI sourceURI) { return withContext(context -> uploadRangeFromURLWithResponse(length, destinationOffset, sourceOffset, sourceURI, context)); } Mono<Response<FileUploadRangeFromURLInfo>> uploadRangeFromURLWithResponse(long length, long destinationOffset, long sourceOffset, URI sourceURI, Context context) { FileRange destinationRange = new FileRange(destinationOffset, destinationOffset + length - 1); FileRange sourceRange = new FileRange(sourceOffset, sourceOffset + length - 1); return postProcessResponse(azureFileStorageClient.files() .uploadRangeFromURLWithRestResponseAsync(shareName, filePath, destinationRange.toString(), sourceURI.toString(), 0, null, sourceRange.toString(), null, null, context)) .map(this::uploadRangeFromURLResponse); } /** * Clear a range of bytes to specific of a file in storage file service. Clear operations performs an in-place write * on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Clears the first 1024 bytes. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.clearRange * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being cleared. * @return The {@link FileUploadInfo file upload info} */ public Mono<FileUploadInfo> clearRange(long length) { return clearRangeWithResponse(length, 0).flatMap(FluxUtil::toMono); } /** * Clear a range of bytes to specific of a file in storage file service. Clear operations performs an in-place write * on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Clear the range starting from 1024 with length of 1024. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.clearRange * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being cleared in the request body. * @param offset Optional starting point of the upload range. It will start from the beginning if it is * {@code null} * @return A response of {@link FileUploadInfo file upload info} that only contains headers and response status code */ public Mono<Response<FileUploadInfo>> clearRangeWithResponse(long length, long offset) { return withContext(context -> clearRangeWithResponse(length, offset, context)); } Mono<Response<FileUploadInfo>> clearRangeWithResponse(long length, long offset, Context context) { FileRange range = new FileRange(offset, offset + length - 1); return postProcessResponse(azureFileStorageClient.files() .uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.CLEAR, 0L, null, null, null, context)) .map(this::uploadResponse); } /** * Uploads file to storage file service. * * <p><strong>Code Samples</strong></p> * * <p> Upload the file from the source file path. </p> * * (@codesnippet com.azure.storage.file.fileAsyncClient.uploadFromFile
class FileAsyncClient { private final ClientLogger logger = new ClientLogger(FileAsyncClient.class); private static final long FILE_DEFAULT_BLOCK_SIZE = 4 * 1024 * 1024L; private static final long DOWNLOAD_UPLOAD_CHUNK_TIMEOUT = 300; private final AzureFileStorageImpl azureFileStorageClient; private final String shareName; private final String filePath; private final String snapshot; /** * Creates a FileAsyncClient that sends requests to the storage file at {@link AzureFileStorageImpl * endpoint}. Each service call goes through the {@link HttpPipeline pipeline} in the {@code client}. * * @param azureFileStorageClient Client that interacts with the service interfaces * @param shareName Name of the share * @param filePath Path to the file * @param snapshot The snapshot of the share */ FileAsyncClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String filePath, String snapshot) { Objects.requireNonNull(shareName); Objects.requireNonNull(filePath); this.shareName = shareName; this.filePath = filePath; this.snapshot = snapshot; this.azureFileStorageClient = azureFileStorageClient; } /** * Get the url of the storage file client. * * @return the URL of the storage file client * @throws RuntimeException If the file is using a malformed URL. */ /** * Creates a file in the storage account and returns a response of {@link FileInfo} to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with size 1KB.</p> * * {@codesnippet com.azure.storage.file.fileClient.create} * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @return A response containing the file info and the status of creating the file. * @throws StorageException If the file has already existed, the parent directory does not exist or fileName is an * invalid resource name. */ public Mono<FileInfo> create(long maxSize) { return createWithResponse(maxSize, null, null, null, null).flatMap(FluxUtil::toMono); } /** * Creates a file in the storage account and returns a response of FileInfo to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with length of 1024 bytes, some headers, file smb properties and metadata.</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.createWithResponse * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @param httpHeaders The user settable file http headers. * @param smbProperties The user settable file smb properties. * @param filePermission The file permission of the file. * @param metadata Optional name-value pairs associated with the file as metadata. * @return A response containing the {@link FileInfo file info} and the status of creating the file. * @throws StorageException If the directory has already existed, the parent directory does not exist or directory * is an invalid resource name. */ public Mono<Response<FileInfo>> createWithResponse(long maxSize, FileHTTPHeaders httpHeaders, FileSmbProperties smbProperties, String filePermission, Map<String, String> metadata) { return withContext(context -> createWithResponse(maxSize, httpHeaders, smbProperties, filePermission, metadata, context)); } Mono<Response<FileInfo>> createWithResponse(long maxSize, FileHTTPHeaders httpHeaders, FileSmbProperties smbProperties, String filePermission, Map<String, String> metadata, Context context) { smbProperties = smbProperties == null ? new FileSmbProperties() : smbProperties; filePermissionAndKeyHelper(filePermission, smbProperties.getFilePermissionKey()); filePermission = smbProperties.setFilePermission(filePermission, FileConstants.FILE_PERMISSION_INHERIT); String filePermissionKey = smbProperties.getFilePermissionKey(); String fileAttributes = smbProperties.setNtfsFileAttributes(FileConstants.FILE_ATTRIBUTES_NONE); String fileCreationTime = smbProperties.setFileCreationTime(FileConstants.FILE_TIME_NOW); String fileLastWriteTime = smbProperties.setFileLastWriteTime(FileConstants.FILE_TIME_NOW); return postProcessResponse(azureFileStorageClient.files() .createWithRestResponseAsync(shareName, filePath, maxSize, fileAttributes, fileCreationTime, fileLastWriteTime, null, metadata, filePermission, filePermissionKey, httpHeaders, context)) .map(this::createFileInfoResponse); } /** * Copies a blob or file to a destination file within the storage account. * * <p><strong>Code Samples</strong></p> * * <p>Copy file from source url to the {@code resourcePath} </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.startCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the * naming rules. * @return The {@link FileCopyInfo file copy info}. * @see <a href="https: */ public Mono<FileCopyInfo> startCopy(String sourceUrl, Map<String, String> metadata) { return startCopyWithResponse(sourceUrl, metadata).flatMap(FluxUtil::toMono); } /** * Copies a blob or file to a destination file within the storage account. * * <p><strong>Code Samples</strong></p> * * <p>Copy file from source url to the {@code resourcePath} </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.startCopyWithResponse * * <p>For more information, see the * <a href="https: * * @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the * naming rules. * @return A response containing the {@link FileCopyInfo file copy info} and the status of copying the file. * @see <a href="https: */ public Mono<Response<FileCopyInfo>> startCopyWithResponse(String sourceUrl, Map<String, String> metadata) { return withContext(context -> startCopyWithResponse(sourceUrl, metadata, context)); } Mono<Response<FileCopyInfo>> startCopyWithResponse(String sourceUrl, Map<String, String> metadata, Context context) { return postProcessResponse(azureFileStorageClient.files() .startCopyWithRestResponseAsync(shareName, filePath, sourceUrl, null, metadata, context)) .map(this::startCopyResponse); } /** * Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. * * <p><strong>Code Samples</strong></p> * * <p>Abort copy file from copy id("someCopyId") </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.abortCopy * * <p>For more information, see the * <a href="https: * * @param copyId Specifies the copy id which has copying pending status associate with it. * @return An empty response. */ public Mono<Void> abortCopy(String copyId) { return abortCopyWithResponse(copyId).flatMap(FluxUtil::toMono); } /** * Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. * * <p><strong>Code Samples</strong></p> * * <p>Abort copy file from copy id("someCopyId") </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.abortCopyWithResponse * * <p>For more information, see the * <a href="https: * * @param copyId Specifies the copy id which has copying pending status associate with it. * @return A response containing the status of aborting copy the file. */ public Mono<Response<Void>> abortCopyWithResponse(String copyId) { return withContext(context -> abortCopyWithResponse(copyId, context)); } Mono<Response<Void>> abortCopyWithResponse(String copyId, Context context) { return postProcessResponse(azureFileStorageClient.files() .abortCopyWithRestResponseAsync(shareName, filePath, copyId, context)) .map(response -> new SimpleResponse<>(response, null)); } /** * Downloads a file from the system, including its metadata and properties into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p><strong>Code Samples</strong></p> * * <p>Download the file to current folder. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @return An empty response. */ public Mono<FileProperties> downloadToFile(String downloadFilePath) { return downloadToFileWithResponse(downloadFilePath, null).flatMap(FluxUtil::toMono); } /** * Downloads a file from the system, including its metadata and properties into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes to current folder. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadToFileWithResponse * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @param range Optional byte range which returns file data only from the specified range. * @return An empty response. */ public Mono<Response<FileProperties>> downloadToFileWithResponse(String downloadFilePath, FileRange range) { return withContext(context -> downloadToFileWithResponse(downloadFilePath, range, context)); } Mono<Response<FileProperties>> downloadToFileWithResponse(String downloadFilePath, FileRange range, Context context) { return Mono.using(() -> channelSetup(downloadFilePath, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW), channel -> getPropertiesWithResponse(context).flatMap(response -> downloadResponseInChunk(response, channel, range, context)), this::channelCleanUp); } private Mono<Response<FileProperties>> downloadResponseInChunk(Response<FileProperties> response, AsynchronousFileChannel channel, FileRange range, Context context) { return Mono.justOrEmpty(range).switchIfEmpty(Mono.just(new FileRange(0, response.getValue() .getContentLength()))) .map(currentRange -> { List<FileRange> chunks = new ArrayList<>(); for (long pos = currentRange.getStart(); pos < currentRange.getEnd(); pos += FILE_DEFAULT_BLOCK_SIZE) { long count = FILE_DEFAULT_BLOCK_SIZE; if (pos + count > currentRange.getEnd()) { count = currentRange.getEnd() - pos; } chunks.add(new FileRange(pos, pos + count - 1)); } return chunks; }).flatMapMany(Flux::fromIterable).flatMap(chunk -> downloadWithPropertiesWithResponse(chunk, false, context) .map(dar -> dar.getValue().getBody()) .subscribeOn(Schedulers.elastic()) .flatMap(fbb -> FluxUtil .writeFile(fbb, channel, chunk.getStart() - (range == null ? 0 : range.getStart())) .subscribeOn(Schedulers.elastic()) .timeout(Duration.ofSeconds(DOWNLOAD_UPLOAD_CHUNK_TIMEOUT)) .retry(3, throwable -> throwable instanceof IOException || throwable instanceof TimeoutException))) .then(Mono.just(response)); } private AsynchronousFileChannel channelSetup(String filePath, OpenOption... options) { try { return AsynchronousFileChannel.open(Paths.get(filePath), options); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } } private void channelCleanUp(AsynchronousFileChannel channel) { try { channel.close(); } catch (IOException e) { throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(e))); } } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file with its metadata and properties. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadWithProperties} * * <p>For more information, see the * <a href="https: * * @return The {@link FileDownloadInfo file download Info} */ public Mono<FileDownloadInfo> downloadWithProperties() { return downloadWithPropertiesWithResponse(null, null).flatMap(FluxUtil::toMono); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadWithPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param range Optional byte range which returns file data only from the specified range. * @param rangeGetContentMD5 Optional boolean which the service returns the MD5 hash for the range when it sets to * true, as long as the range is less than or equal to 4 MB in size. * @return A response containing the {@link FileDownloadInfo file download Info} with headers and response status * code */ public Mono<Response<FileDownloadInfo>> downloadWithPropertiesWithResponse(FileRange range, Boolean rangeGetContentMD5) { return withContext(context -> downloadWithPropertiesWithResponse(range, rangeGetContentMD5, context)); } Mono<Response<FileDownloadInfo>> downloadWithPropertiesWithResponse(FileRange range, Boolean rangeGetContentMD5, Context context) { String rangeString = range == null ? null : range.toString(); return postProcessResponse(azureFileStorageClient.files() .downloadWithRestResponseAsync(shareName, filePath, null, rangeString, rangeGetContentMD5, context)) .map(this::downloadWithPropertiesResponse); } /** * Deletes the file associate with the client. * * <p><strong>Code Samples</strong></p> * * <p>Delete the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.delete} * * <p>For more information, see the * <a href="https: * * @return An empty response * @throws StorageException If the directory doesn't exist or the file doesn't exist. */ public Mono<Void> delete() { return deleteWithResponse(null).flatMap(FluxUtil::toMono); } /** * Deletes the file associate with the client. * * <p><strong>Code Samples</strong></p> * * <p>Delete the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.deleteWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response that only contains headers and response status code * @throws StorageException If the directory doesn't exist or the file doesn't exist. */ public Mono<Response<Void>> deleteWithResponse() { return withContext(this::deleteWithResponse); } Mono<Response<Void>> deleteWithResponse(Context context) { return postProcessResponse(azureFileStorageClient.files() .deleteWithRestResponseAsync(shareName, filePath, context)) .map(response -> new SimpleResponse<>(response, null)); } /** * Retrieves the properties of the storage account's file. The properties includes file metadata, last modified * date, is server encrypted, and eTag. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve file properties</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.getProperties} * * <p>For more information, see the * <a href="https: * * @return {@link FileProperties Storage file properties} */ public Mono<FileProperties> getProperties() { return getPropertiesWithResponse().flatMap(FluxUtil::toMono); } /** * Retrieves the properties of the storage account's file. The properties includes file metadata, last modified * date, is server encrypted, and eTag. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve file properties</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.getPropertiesWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response containing the {@link FileProperties storage file properties} and response status code */ public Mono<Response<FileProperties>> getPropertiesWithResponse() { return withContext(this::getPropertiesWithResponse); } Mono<Response<FileProperties>> getPropertiesWithResponse(Context context) { return postProcessResponse(azureFileStorageClient.files() .getPropertiesWithRestResponseAsync(shareName, filePath, snapshot, null, context)) .map(this::getPropertiesResponse); } /** * Sets the user-defined file properties to associate to the file. * * <p>If {@code null} is passed for the fileProperties.httpHeaders it will clear the httpHeaders associated to the * file. * If {@code null} is passed for the fileProperties.filesmbproperties it will preserve the filesmb properties * associated with the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the httpHeaders of contentType of "text/plain"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setProperties * * <p>Clear the metadata of the file and preserve the SMB properties</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setProperties * * <p>For more information, see the * <a href="https: * * @param newFileSize New file size of the file * @param httpHeaders The user settable file http headers. * @param smbProperties The user settable file smb properties. * @param filePermission The file permission of the file * @return The {@link FileInfo file info} * @throws IllegalArgumentException thrown if parameters fail the validation. */ public Mono<FileInfo> setProperties(long newFileSize, FileHTTPHeaders httpHeaders, FileSmbProperties smbProperties, String filePermission) { return setPropertiesWithResponse(newFileSize, httpHeaders, smbProperties, filePermission) .flatMap(FluxUtil::toMono); } /** * Sets the user-defined file properties to associate to the file. * * <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file. * If {@code null} is passed for the filesmbproperties it will preserve the filesmbproperties associated with the * file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the httpHeaders of contentType of "text/plain"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setPropertiesWithResponse * * <p>Clear the metadata of the file and preserve the SMB properties</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param newFileSize New file size of the file. * @param httpHeaders The user settable file http headers. * @param smbProperties The user settable file smb properties. * @param filePermission The file permission of the file. * @return Response containing the {@link FileInfo file info} and response status code. * @throws IllegalArgumentException thrown if parameters fail the validation. */ public Mono<Response<FileInfo>> setPropertiesWithResponse(long newFileSize, FileHTTPHeaders httpHeaders, FileSmbProperties smbProperties, String filePermission) { return withContext(context -> setPropertiesWithResponse(newFileSize, httpHeaders, smbProperties, filePermission, context)); } Mono<Response<FileInfo>> setPropertiesWithResponse(long newFileSize, FileHTTPHeaders httpHeaders, FileSmbProperties smbProperties, String filePermission, Context context) { smbProperties = smbProperties == null ? new FileSmbProperties() : smbProperties; filePermissionAndKeyHelper(filePermission, smbProperties.getFilePermissionKey()); filePermission = smbProperties.setFilePermission(filePermission, FileConstants.PRESERVE); String filePermissionKey = smbProperties.getFilePermissionKey(); String fileAttributes = smbProperties.setNtfsFileAttributes(FileConstants.PRESERVE); String fileCreationTime = smbProperties.setFileCreationTime(FileConstants.PRESERVE); String fileLastWriteTime = smbProperties.setFileLastWriteTime(FileConstants.PRESERVE); return postProcessResponse(azureFileStorageClient.files() .setHTTPHeadersWithRestResponseAsync(shareName, filePath, fileAttributes, fileCreationTime, fileLastWriteTime, null, newFileSize, filePermission, filePermissionKey, httpHeaders, context)) .map(this::setPropertiesResponse); } /** * Sets the user-defined metadata to associate to the file. * * <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the metadata to "file:updatedMetadata"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadata * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared * @return {@link FileMetadataInfo file meta info} * @throws StorageException If the file doesn't exist or the metadata contains invalid keys */ public Mono<FileMetadataInfo> setMetadata(Map<String, String> metadata) { return setMetadataWithResponse(metadata).flatMap(FluxUtil::toMono); } /** * Sets the user-defined metadata to associate to the file. * * <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the metadata to "file:updatedMetadata"</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadataWithResponse * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared * @return A response containing the {@link FileMetadataInfo file meta info} and status code * @throws StorageException If the file doesn't exist or the metadata contains invalid keys */ public Mono<Response<FileMetadataInfo>> setMetadataWithResponse(Map<String, String> metadata) { return withContext(context -> setMetadataWithResponse(metadata, context)); } Mono<Response<FileMetadataInfo>> setMetadataWithResponse(Map<String, String> metadata, Context context) { return postProcessResponse(azureFileStorageClient.files() .setMetadataWithRestResponseAsync(shareName, filePath, null, metadata, context)) .map(this::setMetadataResponse); } /** * Uploads a range of bytes to the beginning of a file in storage file service. Upload operations performs an * in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload data "default" to the file in Storage File Service. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.upload * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. * @return A response that only contains headers and response status code */ public Mono<FileUploadInfo> upload(Flux<ByteBuffer> data, long length) { return uploadWithResponse(data, length).flatMap(FluxUtil::toMono); } /** * Uploads a range of bytes to the beginning of a file in storage file service. Upload operations performs an * in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload "default" to the file. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.uploadWithResponse * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. When the FileRangeWriteType is * set to clear, the value of this header must be set to zero.. * @return A response containing the {@link FileUploadInfo file upload info} with headers and response status code * @throws StorageException If you attempt to upload a range that is larger than 4 MB, the service returns status * code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length) { return withContext(context -> uploadWithResponse(data, length, context)); } Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length, Context context) { FileRange range = new FileRange(0, length - 1); return postProcessResponse(azureFileStorageClient.files() .uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, length, data, null, null, context)) .map(this::uploadResponse); } /** * Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload data "default" starting from 1024 bytes. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.upload * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. * @param offset Optional starting point of the upload range. It will start from the beginning if it is * {@code null} * @return The {@link FileUploadInfo file upload info} * @throws StorageException If you attempt to upload a range that is larger than 4 MB, the service returns status * code 413 (Request Entity Too Large) */ public Mono<FileUploadInfo> upload(Flux<ByteBuffer> data, long length, long offset) { return uploadWithResponse(data, length, offset).flatMap(FluxUtil::toMono); } /** * Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.uploadWithResponse * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param offset Optional starting point of the upload range. It will start from the beginning if it is * {@code null} * @param length Specifies the number of bytes being transmitted in the request body. When the FileRangeWriteType is * set to clear, the value of this header must be set to zero. * @return A response containing the {@link FileUploadInfo file upload info} with headers and response status code * @throws StorageException If you attempt to upload a range that is larger than 4 MB, the service returns status * code 413 (Request Entity Too Large) */ public Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length, long offset) { return withContext(context -> uploadWithResponse(data, length, offset, context)); } Mono<Response<FileUploadInfo>> uploadWithResponse(Flux<ByteBuffer> data, long length, long offset, Context context) { FileRange range = new FileRange(offset, offset + length - 1); return postProcessResponse(azureFileStorageClient.files() .uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.UPDATE, length, data, null, null, context)) .map(this::uploadResponse); } /** * Uploads a range of bytes from one file to another file. * * <p><strong>Code Samples</strong></p> * * <p>Upload a number of bytes from a file at defined source and destination offsets </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.uploadRangeFromURL * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being transmitted in the request body. * @param destinationOffset Starting point of the upload range on the destination. * @param sourceOffset Starting point of the upload range on the source. * @param sourceURI Specifies the URL of the source file. * @return The {@link FileUploadRangeFromURLInfo file upload range from url info} */ public Mono<FileUploadRangeFromURLInfo> uploadRangeFromURL(long length, long destinationOffset, long sourceOffset, URI sourceURI) { return uploadRangeFromURLWithResponse(length, destinationOffset, sourceOffset, sourceURI) .flatMap(FluxUtil::toMono); } /** * Uploads a range of bytes from one file to another file. * * <p><strong>Code Samples</strong></p> * * <p>Upload a number of bytes from a file at defined source and destination offsets </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.uploadRangeFromURLWithResponse * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being transmitted in the request body. * @param destinationOffset Starting point of the upload range on the destination. * @param sourceOffset Starting point of the upload range on the source. * @param sourceURI Specifies the URL of the source file. * @return A response containing the {@link FileUploadRangeFromURLInfo file upload range from url info} with headers * and response status code. */ public Mono<Response<FileUploadRangeFromURLInfo>> uploadRangeFromURLWithResponse(long length, long destinationOffset, long sourceOffset, URI sourceURI) { return withContext(context -> uploadRangeFromURLWithResponse(length, destinationOffset, sourceOffset, sourceURI, context)); } Mono<Response<FileUploadRangeFromURLInfo>> uploadRangeFromURLWithResponse(long length, long destinationOffset, long sourceOffset, URI sourceURI, Context context) { FileRange destinationRange = new FileRange(destinationOffset, destinationOffset + length - 1); FileRange sourceRange = new FileRange(sourceOffset, sourceOffset + length - 1); return postProcessResponse(azureFileStorageClient.files() .uploadRangeFromURLWithRestResponseAsync(shareName, filePath, destinationRange.toString(), sourceURI.toString(), 0, null, sourceRange.toString(), null, null, context)) .map(this::uploadRangeFromURLResponse); } /** * Clear a range of bytes to specific of a file in storage file service. Clear operations performs an in-place write * on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Clears the first 1024 bytes. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.clearRange * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being cleared. * @return The {@link FileUploadInfo file upload info} */ public Mono<FileUploadInfo> clearRange(long length) { return clearRangeWithResponse(length, 0).flatMap(FluxUtil::toMono); } /** * Clear a range of bytes to specific of a file in storage file service. Clear operations performs an in-place write * on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Clear the range starting from 1024 with length of 1024. </p> * * {@codesnippet com.azure.storage.file.fileAsyncClient.clearRange * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being cleared in the request body. * @param offset Optional starting point of the upload range. It will start from the beginning if it is * {@code null} * @return A response of {@link FileUploadInfo file upload info} that only contains headers and response status code */ public Mono<Response<FileUploadInfo>> clearRangeWithResponse(long length, long offset) { return withContext(context -> clearRangeWithResponse(length, offset, context)); } Mono<Response<FileUploadInfo>> clearRangeWithResponse(long length, long offset, Context context) { FileRange range = new FileRange(offset, offset + length - 1); return postProcessResponse(azureFileStorageClient.files() .uploadRangeWithRestResponseAsync(shareName, filePath, range.toString(), FileRangeWriteType.CLEAR, 0L, null, null, null, context)) .map(this::uploadResponse); } /** * Uploads file to storage file service. * * <p><strong>Code Samples</strong></p> * * <p> Upload the file from the source file path. </p> * * (@codesnippet com.azure.storage.file.fileAsyncClient.uploadFromFile