comment stringlengths 1 45k | method_body stringlengths 23 281k | target_code stringlengths 0 5.16k | method_body_after stringlengths 12 281k | context_before stringlengths 8 543k | context_after stringlengths 8 543k |
|---|---|---|---|---|---|
```suggestion // So it is okay to return an empty Flux. ``` | void loadBalance() {
/*
* Retrieve current partition ownership details from the datastore.
*/
final Mono<Map<String, PartitionOwnership>> partitionOwnershipMono = checkpointStore
.listOwnership(fullyQualifiedNamespace, eventHubName, consumerGroupName)
.timeout(Duration.ofMinutes(1))
.collectMap(PartitionOwnership::getPartitionId, Function.identity());
/*
* Retrieve the list of partition ids from the Event Hub.
*/
final Mono<List<String>> partitionsMono = eventHubAsyncClient
.getPartitionIds()
.timeout(Duration.ofMinutes(1))
.onErrorResume(TimeoutException.class, error -> {
logger.warning("Unable to get partitionIds from eventHubAsyncClient.");
return Flux.empty();
})
.collectList();
Mono.zip(partitionOwnershipMono, partitionsMono)
.flatMap(this::loadBalance)
.subscribe(ignored -> { },
ex -> {
logger.warning(Messages.LOAD_BALANCING_FAILED, ex.getMessage(), ex);
ErrorContext errorContext = new ErrorContext(partitionAgnosticContext, ex);
processError.accept(errorContext);
}, () -> logger.info("Load balancing completed successfully"));
} | void loadBalance() {
/*
* Retrieve current partition ownership details from the datastore.
*/
final Mono<Map<String, PartitionOwnership>> partitionOwnershipMono = checkpointStore
.listOwnership(fullyQualifiedNamespace, eventHubName, consumerGroupName)
.timeout(Duration.ofMinutes(1))
.collectMap(PartitionOwnership::getPartitionId, Function.identity());
/*
* Retrieve the list of partition ids from the Event Hub.
*/
final Mono<List<String>> partitionsMono = eventHubAsyncClient
.getPartitionIds()
.timeout(Duration.ofMinutes(1))
.onErrorResume(TimeoutException.class, error -> {
logger.warning("Unable to get partitionIds from eventHubAsyncClient.");
return Flux.empty();
})
.collectList();
Mono.zip(partitionOwnershipMono, partitionsMono)
.flatMap(this::loadBalance)
.subscribe(ignored -> { },
ex -> {
logger.warning(Messages.LOAD_BALANCING_FAILED, ex.getMessage(), ex);
ErrorContext errorContext = new ErrorContext(partitionAgnosticContext, ex);
processError.accept(errorContext);
}, () -> logger.info("Load balancing completed successfully"));
} | class PartitionBasedLoadBalancer {
private static final Random RANDOM = new Random();
private final ClientLogger logger = new ClientLogger(PartitionBasedLoadBalancer.class);
private final String eventHubName;
private final String consumerGroupName;
private final CheckpointStore checkpointStore;
private final EventHubAsyncClient eventHubAsyncClient;
private final String ownerId;
private final long inactiveTimeLimitInSeconds;
private final PartitionPumpManager partitionPumpManager;
private final String fullyQualifiedNamespace;
private final Consumer<ErrorContext> processError;
private final PartitionContext partitionAgnosticContext;
/**
* Creates an instance of PartitionBasedLoadBalancer for the given Event Hub name and consumer group.
*
* @param checkpointStore The partition manager that this load balancer will use to read/update ownership details.
* @param eventHubAsyncClient The asynchronous Event Hub client used to consume events.
* @param eventHubName The Event Hub name the {@link EventProcessorClient} is associated with.
* @param consumerGroupName The consumer group name the {@link EventProcessorClient} is associated with.
* @param ownerId The identifier of the {@link EventProcessorClient} that owns this load balancer.
* @param inactiveTimeLimitInSeconds The time in seconds to wait for an update on an ownership record before
* assuming the owner of the partition is inactive.
* @param partitionPumpManager The partition pump manager that keeps track of all EventHubConsumers and partitions
* that this {@link EventProcessorClient} is processing.
* @param processError The callback that will be called when an error occurs while running the load balancer.
*/
PartitionBasedLoadBalancer(final CheckpointStore checkpointStore,
final EventHubAsyncClient eventHubAsyncClient, final String fullyQualifiedNamespace,
final String eventHubName, final String consumerGroupName, final String ownerId,
final long inactiveTimeLimitInSeconds, final PartitionPumpManager partitionPumpManager,
final Consumer<ErrorContext> processError) {
this.checkpointStore = checkpointStore;
this.eventHubAsyncClient = eventHubAsyncClient;
this.fullyQualifiedNamespace = fullyQualifiedNamespace;
this.eventHubName = eventHubName;
this.consumerGroupName = consumerGroupName;
this.ownerId = ownerId;
this.inactiveTimeLimitInSeconds = inactiveTimeLimitInSeconds;
this.partitionPumpManager = partitionPumpManager;
this.processError = processError;
this.partitionAgnosticContext = new PartitionContext(fullyQualifiedNamespace, eventHubName,
consumerGroupName, "NONE");
}
/**
* This is the main method responsible for load balancing. This method is expected to be invoked by the {@link
* EventProcessorClient} periodically. Every call to this method will result in this {@link EventProcessorClient}
* owning <b>at most one</b> new partition.
* <p>
* The load is considered balanced when no active EventProcessor owns 2 partitions more than any other active
* EventProcessor. Given that each invocation to this method results in ownership claim of at most one partition,
* this algorithm converges gradually towards a steady state.
* </p>
* When a new partition is claimed, this method is also responsible for starting a partition pump that creates an
* {@link EventHubConsumerAsyncClient} for processing events from that partition.
*/
/*
* This method works with the given partition ownership details and Event Hub partitions to evaluate whether the
* current Event Processor should take on the responsibility of processing more partitions.
*/
private Mono<Void> loadBalance(final Tuple2<Map<String, PartitionOwnership>, List<String>> tuple) {
return Mono.fromRunnable(() -> {
logger.info("Starting load balancer for {}", this.ownerId);
Map<String, PartitionOwnership> partitionOwnershipMap = tuple.getT1();
List<String> partitionIds = tuple.getT2();
if (CoreUtils.isNullOrEmpty(partitionIds)) {
throw logger.logExceptionAsError(Exceptions.propagate(
new IllegalStateException("There are no partitions in Event Hub " + eventHubName)));
}
int numberOfPartitions = partitionIds.size();
logger.info("CheckpointStore returned {} ownership records", partitionOwnershipMap.size());
logger.info("Event Hubs service returned {} partitions", numberOfPartitions);
if (!isValid(partitionOwnershipMap)) {
throw logger.logExceptionAsError(Exceptions.propagate(
new IllegalStateException("Invalid partitionOwnership data from CheckpointStore")));
}
/*
* Remove all partitions' ownership that have not been modified for a configuration period of time. This
* means that the previous EventProcessor that owned the partition is probably down and the partition is now
* eligible to be claimed by other EventProcessors.
*/
Map<String, PartitionOwnership> activePartitionOwnershipMap = removeInactivePartitionOwnerships(
partitionOwnershipMap);
logger.info("Number of active ownership records {}", activePartitionOwnershipMap.size());
/*
* Create a map of owner id and a list of partitions it owns
*/
Map<String, List<PartitionOwnership>> ownerPartitionMap = activePartitionOwnershipMap.values()
.stream()
.collect(
Collectors.groupingBy(PartitionOwnership::getOwnerId, mapping(Function.identity(), toList())));
ownerPartitionMap.putIfAbsent(this.ownerId, new ArrayList<>());
if (CoreUtils.isNullOrEmpty(activePartitionOwnershipMap)) {
/*
* If the active partition ownership map is empty, this is the first time an event processor is
* running or all Event Processors are down for this Event Hub, consumer group combination. All
* partitions in this Event Hub are available to claim. Choose a random partition to claim ownership.
*/
claimOwnership(partitionOwnershipMap, ownerPartitionMap,
partitionIds.get(RANDOM.nextInt(numberOfPartitions)));
return;
}
/*
* Find the minimum number of partitions every event processor should own when the load is
* evenly distributed.
*/
int numberOfActiveEventProcessors = ownerPartitionMap.size();
logger.info("Number of active event processors {}", ownerPartitionMap.size());
int minPartitionsPerEventProcessor = numberOfPartitions / numberOfActiveEventProcessors;
/*
* If the number of partitions in Event Hub is not evenly divisible by number of active event processors,
* a few Event Processors may own 1 additional partition than the minimum when the load is balanced.
* Calculate the number of event processors that can own additional partition.
*/
int numberOfEventProcessorsWithAdditionalPartition = numberOfPartitions % numberOfActiveEventProcessors;
logger.info("Expected min partitions per event processor = {}, expected number of event "
+ "processors with additional partition = {}", minPartitionsPerEventProcessor,
numberOfEventProcessorsWithAdditionalPartition);
if (isLoadBalanced(minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition,
ownerPartitionMap)) {
logger.info("Load is balanced with this event processor owning {} partitions",
ownerPartitionMap.get(ownerId).size());
checkpointStore.claimOwnership(partitionPumpManager.getPartitionPumps().keySet()
.stream()
.map(partitionId -> createPartitionOwnershipRequest(partitionOwnershipMap, partitionId))
.collect(Collectors.toList()))
.subscribe();
return;
}
if (!shouldOwnMorePartitions(minPartitionsPerEventProcessor, ownerPartitionMap)) {
logger.info("This event processor owns {} partitions and shouldn't own more",
ownerPartitionMap.get(ownerId).size());
checkpointStore.claimOwnership(partitionPumpManager.getPartitionPumps().keySet()
.stream()
.map(partitionId -> createPartitionOwnershipRequest(partitionOwnershipMap, partitionId))
.collect(Collectors.toList()))
.subscribe();
return;
}
logger.info(
"Load is unbalanced and this event processor owns {} partitions and should own more partitions",
ownerPartitionMap.get(ownerId).size());
/*
* If some partitions are unclaimed, this could be because an event processor is down and
* it's partitions are now available for others to own or because event processors are just
* starting up and gradually claiming partitions to own or new partitions were added to Event Hub.
* Find any partition that is not actively owned and claim it.
*
* OR
*
* Find a partition to steal from another event processor. Pick the event processor that has owns the
* highest number of partitions.
*/
String partitionToClaim = partitionIds.parallelStream()
.filter(partitionId -> !activePartitionOwnershipMap.containsKey(partitionId))
.findAny()
.orElseGet(() -> {
logger.info("No unclaimed partitions, stealing from another event processor");
return findPartitionToSteal(ownerPartitionMap);
});
claimOwnership(partitionOwnershipMap, ownerPartitionMap, partitionToClaim);
});
}
/*
* Check if partition ownership data is valid before proceeding with load balancing.
*/
private boolean isValid(final Map<String, PartitionOwnership> partitionOwnershipMap) {
return partitionOwnershipMap.values()
.stream()
.noneMatch(partitionOwnership -> {
return partitionOwnership.getEventHubName() == null
|| !partitionOwnership.getEventHubName().equals(this.eventHubName)
|| partitionOwnership.getConsumerGroup() == null
|| !partitionOwnership.getConsumerGroup().equals(this.consumerGroupName)
|| partitionOwnership.getPartitionId() == null
|| partitionOwnership.getLastModifiedTime() == null
|| partitionOwnership.getETag() == null;
});
}
/*
* Find the event processor that owns the maximum number of partitions and steal a random partition
* from it.
*/
private String findPartitionToSteal(final Map<String, List<PartitionOwnership>> ownerPartitionMap) {
Map.Entry<String, List<PartitionOwnership>> ownerWithMaxPartitions = ownerPartitionMap.entrySet()
.stream()
.max(Comparator.comparingInt(entry -> entry.getValue().size()))
.get();
int numberOfPartitions = ownerWithMaxPartitions.getValue().size();
logger.info("Owner id {} owns {} partitions, stealing a partition from it", ownerWithMaxPartitions.getKey(),
numberOfPartitions);
return ownerWithMaxPartitions.getValue().get(RANDOM.nextInt(numberOfPartitions)).getPartitionId();
}
/*
* When the load is balanced, all active event processors own at least {@code minPartitionsPerEventProcessor}
* and only {@code numberOfEventProcessorsWithAdditionalPartition} event processors will own 1 additional
* partition.
*/
private boolean isLoadBalanced(final int minPartitionsPerEventProcessor,
final int numberOfEventProcessorsWithAdditionalPartition,
final Map<String, List<PartitionOwnership>> ownerPartitionMap) {
int count = 0;
for (List<PartitionOwnership> partitionOwnership : ownerPartitionMap.values()) {
int numberOfPartitions = partitionOwnership.size();
if (numberOfPartitions < minPartitionsPerEventProcessor
|| numberOfPartitions > minPartitionsPerEventProcessor + 1) {
return false;
}
if (numberOfPartitions == minPartitionsPerEventProcessor + 1) {
count++;
}
}
return count == numberOfEventProcessorsWithAdditionalPartition;
}
/*
* This method is called after determining that the load is not balanced. This method will evaluate
* if the current event processor should own more partitions. Specifically, this method returns true if the
* current event processor owns less than the minimum number of partitions or if it owns the minimum number
* and no other event processor owns lesser number of partitions than this event processor.
*/
private boolean shouldOwnMorePartitions(final int minPartitionsPerEventProcessor,
final Map<String, List<PartitionOwnership>> ownerPartitionMap) {
int numberOfPartitionsOwned = ownerPartitionMap.get(this.ownerId).size();
int leastPartitionsOwnedByAnyEventProcessor =
ownerPartitionMap.values().stream().min(Comparator.comparingInt(List::size)).get().size();
return numberOfPartitionsOwned < minPartitionsPerEventProcessor
|| numberOfPartitionsOwned == leastPartitionsOwnedByAnyEventProcessor;
}
/*
* This method will create a new map of partition id and PartitionOwnership containing only those partitions
* that are actively owned. All entries in the original map returned by CheckpointStore that haven't been
* modified for a duration of time greater than the allowed inactivity time limit are assumed to be owned by
* dead event processors. These will not be included in the map returned by this method.
*/
private Map<String, PartitionOwnership> removeInactivePartitionOwnerships(
final Map<String, PartitionOwnership> partitionOwnershipMap) {
return partitionOwnershipMap
.entrySet()
.stream()
.filter(entry -> {
return (System.currentTimeMillis() - entry.getValue().getLastModifiedTime() < TimeUnit.SECONDS
.toMillis(inactiveTimeLimitInSeconds))
&& !CoreUtils.isNullOrEmpty(entry.getValue().getOwnerId());
}).collect(Collectors.toMap(Entry::getKey, Entry::getValue));
}
private void claimOwnership(final Map<String, PartitionOwnership> partitionOwnershipMap, Map<String,
List<PartitionOwnership>> ownerPartitionsMap, final String partitionIdToClaim) {
logger.info("Attempting to claim ownership of partition {}", partitionIdToClaim);
PartitionOwnership ownershipRequest = createPartitionOwnershipRequest(partitionOwnershipMap,
partitionIdToClaim);
List<PartitionOwnership> partitionsToClaim = new ArrayList<>();
partitionsToClaim.add(ownershipRequest);
partitionsToClaim.addAll(partitionPumpManager.getPartitionPumps()
.keySet()
.stream()
.map(partitionId -> createPartitionOwnershipRequest(partitionOwnershipMap, partitionId))
.collect(Collectors.toList()));
checkpointStore
.claimOwnership(partitionsToClaim)
.timeout(Duration.ofMinutes(1))
.doOnNext(partitionOwnership -> logger.info("Successfully claimed ownership of partition {}",
partitionOwnership.getPartitionId()))
.doOnError(ex -> logger
.warning(Messages.FAILED_TO_CLAIM_OWNERSHIP, ownershipRequest.getPartitionId(),
ex.getMessage(), ex))
.collectList()
.zipWhen(ownershipList -> checkpointStore.listCheckpoints(fullyQualifiedNamespace, eventHubName,
consumerGroupName)
.collectMap(checkpoint -> checkpoint.getPartitionId(), Function.identity()))
.subscribe(ownedPartitionCheckpointsTuple -> {
ownedPartitionCheckpointsTuple.getT1()
.stream()
.forEach(po -> partitionPumpManager.startPartitionPump(po,
ownedPartitionCheckpointsTuple.getT2().get(po.getPartitionId())));
},
ex -> {
logger.warning("Error while listing checkpoints", ex);
ErrorContext errorContext = new ErrorContext(partitionAgnosticContext, ex);
processError.accept(errorContext);
throw logger.logExceptionAsError(new IllegalStateException("Error while listing checkpoints", ex));
});
}
private PartitionOwnership createPartitionOwnershipRequest(
final Map<String, PartitionOwnership> partitionOwnershipMap,
final String partitionIdToClaim) {
PartitionOwnership previousPartitionOwnership = partitionOwnershipMap.get(partitionIdToClaim);
PartitionOwnership partitionOwnershipRequest = new PartitionOwnership()
.setFullyQualifiedNamespace(this.fullyQualifiedNamespace)
.setOwnerId(this.ownerId)
.setPartitionId(partitionIdToClaim)
.setConsumerGroup(this.consumerGroupName)
.setEventHubName(this.eventHubName)
.setETag(previousPartitionOwnership == null ? null : previousPartitionOwnership.getETag());
return partitionOwnershipRequest;
}
} | class PartitionBasedLoadBalancer {
private static final Random RANDOM = new Random();
private final ClientLogger logger = new ClientLogger(PartitionBasedLoadBalancer.class);
private final String eventHubName;
private final String consumerGroupName;
private final CheckpointStore checkpointStore;
private final EventHubAsyncClient eventHubAsyncClient;
private final String ownerId;
private final long inactiveTimeLimitInSeconds;
private final PartitionPumpManager partitionPumpManager;
private final String fullyQualifiedNamespace;
private final Consumer<ErrorContext> processError;
private final PartitionContext partitionAgnosticContext;
/**
* Creates an instance of PartitionBasedLoadBalancer for the given Event Hub name and consumer group.
*
* @param checkpointStore The partition manager that this load balancer will use to read/update ownership details.
* @param eventHubAsyncClient The asynchronous Event Hub client used to consume events.
* @param eventHubName The Event Hub name the {@link EventProcessorClient} is associated with.
* @param consumerGroupName The consumer group name the {@link EventProcessorClient} is associated with.
* @param ownerId The identifier of the {@link EventProcessorClient} that owns this load balancer.
* @param inactiveTimeLimitInSeconds The time in seconds to wait for an update on an ownership record before
* assuming the owner of the partition is inactive.
* @param partitionPumpManager The partition pump manager that keeps track of all EventHubConsumers and partitions
* that this {@link EventProcessorClient} is processing.
* @param processError The callback that will be called when an error occurs while running the load balancer.
*/
PartitionBasedLoadBalancer(final CheckpointStore checkpointStore,
final EventHubAsyncClient eventHubAsyncClient, final String fullyQualifiedNamespace,
final String eventHubName, final String consumerGroupName, final String ownerId,
final long inactiveTimeLimitInSeconds, final PartitionPumpManager partitionPumpManager,
final Consumer<ErrorContext> processError) {
this.checkpointStore = checkpointStore;
this.eventHubAsyncClient = eventHubAsyncClient;
this.fullyQualifiedNamespace = fullyQualifiedNamespace;
this.eventHubName = eventHubName;
this.consumerGroupName = consumerGroupName;
this.ownerId = ownerId;
this.inactiveTimeLimitInSeconds = inactiveTimeLimitInSeconds;
this.partitionPumpManager = partitionPumpManager;
this.processError = processError;
this.partitionAgnosticContext = new PartitionContext(fullyQualifiedNamespace, eventHubName,
consumerGroupName, "NONE");
}
/**
* This is the main method responsible for load balancing. This method is expected to be invoked by the {@link
* EventProcessorClient} periodically. Every call to this method will result in this {@link EventProcessorClient}
* owning <b>at most one</b> new partition.
* <p>
* The load is considered balanced when no active EventProcessor owns 2 partitions more than any other active
* EventProcessor. Given that each invocation to this method results in ownership claim of at most one partition,
* this algorithm converges gradually towards a steady state.
* </p>
* When a new partition is claimed, this method is also responsible for starting a partition pump that creates an
* {@link EventHubConsumerAsyncClient} for processing events from that partition.
*/
/*
* This method works with the given partition ownership details and Event Hub partitions to evaluate whether the
* current Event Processor should take on the responsibility of processing more partitions.
*/
private Mono<Void> loadBalance(final Tuple2<Map<String, PartitionOwnership>, List<String>> tuple) {
return Mono.fromRunnable(() -> {
logger.info("Starting load balancer for {}", this.ownerId);
Map<String, PartitionOwnership> partitionOwnershipMap = tuple.getT1();
List<String> partitionIds = tuple.getT2();
if (CoreUtils.isNullOrEmpty(partitionIds)) {
throw logger.logExceptionAsError(Exceptions.propagate(
new IllegalStateException("There are no partitions in Event Hub " + eventHubName)));
}
int numberOfPartitions = partitionIds.size();
logger.info("CheckpointStore returned {} ownership records", partitionOwnershipMap.size());
logger.info("Event Hubs service returned {} partitions", numberOfPartitions);
if (!isValid(partitionOwnershipMap)) {
throw logger.logExceptionAsError(Exceptions.propagate(
new IllegalStateException("Invalid partitionOwnership data from CheckpointStore")));
}
/*
* Remove all partitions' ownership that have not been modified for a configuration period of time. This
* means that the previous EventProcessor that owned the partition is probably down and the partition is now
* eligible to be claimed by other EventProcessors.
*/
Map<String, PartitionOwnership> activePartitionOwnershipMap = removeInactivePartitionOwnerships(
partitionOwnershipMap);
logger.info("Number of active ownership records {}", activePartitionOwnershipMap.size());
/*
* Create a map of owner id and a list of partitions it owns
*/
Map<String, List<PartitionOwnership>> ownerPartitionMap = activePartitionOwnershipMap.values()
.stream()
.collect(
Collectors.groupingBy(PartitionOwnership::getOwnerId, mapping(Function.identity(), toList())));
ownerPartitionMap.putIfAbsent(this.ownerId, new ArrayList<>());
if (CoreUtils.isNullOrEmpty(activePartitionOwnershipMap)) {
/*
* If the active partition ownership map is empty, this is the first time an event processor is
* running or all Event Processors are down for this Event Hub, consumer group combination. All
* partitions in this Event Hub are available to claim. Choose a random partition to claim ownership.
*/
claimOwnership(partitionOwnershipMap, ownerPartitionMap,
partitionIds.get(RANDOM.nextInt(numberOfPartitions)));
return;
}
/*
* Find the minimum number of partitions every event processor should own when the load is
* evenly distributed.
*/
int numberOfActiveEventProcessors = ownerPartitionMap.size();
logger.info("Number of active event processors {}", ownerPartitionMap.size());
int minPartitionsPerEventProcessor = numberOfPartitions / numberOfActiveEventProcessors;
/*
* If the number of partitions in Event Hub is not evenly divisible by number of active event processors,
* a few Event Processors may own 1 additional partition than the minimum when the load is balanced.
* Calculate the number of event processors that can own additional partition.
*/
int numberOfEventProcessorsWithAdditionalPartition = numberOfPartitions % numberOfActiveEventProcessors;
logger.info("Expected min partitions per event processor = {}, expected number of event "
+ "processors with additional partition = {}", minPartitionsPerEventProcessor,
numberOfEventProcessorsWithAdditionalPartition);
if (isLoadBalanced(minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition,
ownerPartitionMap)) {
logger.info("Load is balanced with this event processor owning {} partitions",
ownerPartitionMap.get(ownerId).size());
checkpointStore.claimOwnership(partitionPumpManager.getPartitionPumps().keySet()
.stream()
.map(partitionId -> createPartitionOwnershipRequest(partitionOwnershipMap, partitionId))
.collect(Collectors.toList()))
.subscribe();
return;
}
if (!shouldOwnMorePartitions(minPartitionsPerEventProcessor, ownerPartitionMap)) {
logger.info("This event processor owns {} partitions and shouldn't own more",
ownerPartitionMap.get(ownerId).size());
checkpointStore.claimOwnership(partitionPumpManager.getPartitionPumps().keySet()
.stream()
.map(partitionId -> createPartitionOwnershipRequest(partitionOwnershipMap, partitionId))
.collect(Collectors.toList()))
.subscribe();
return;
}
logger.info(
"Load is unbalanced and this event processor owns {} partitions and should own more partitions",
ownerPartitionMap.get(ownerId).size());
/*
* If some partitions are unclaimed, this could be because an event processor is down and
* it's partitions are now available for others to own or because event processors are just
* starting up and gradually claiming partitions to own or new partitions were added to Event Hub.
* Find any partition that is not actively owned and claim it.
*
* OR
*
* Find a partition to steal from another event processor. Pick the event processor that has owns the
* highest number of partitions.
*/
String partitionToClaim = partitionIds.parallelStream()
.filter(partitionId -> !activePartitionOwnershipMap.containsKey(partitionId))
.findAny()
.orElseGet(() -> {
logger.info("No unclaimed partitions, stealing from another event processor");
return findPartitionToSteal(ownerPartitionMap);
});
claimOwnership(partitionOwnershipMap, ownerPartitionMap, partitionToClaim);
});
}
/*
* Check if partition ownership data is valid before proceeding with load balancing.
*/
private boolean isValid(final Map<String, PartitionOwnership> partitionOwnershipMap) {
return partitionOwnershipMap.values()
.stream()
.noneMatch(partitionOwnership -> {
return partitionOwnership.getEventHubName() == null
|| !partitionOwnership.getEventHubName().equals(this.eventHubName)
|| partitionOwnership.getConsumerGroup() == null
|| !partitionOwnership.getConsumerGroup().equals(this.consumerGroupName)
|| partitionOwnership.getPartitionId() == null
|| partitionOwnership.getLastModifiedTime() == null
|| partitionOwnership.getETag() == null;
});
}
/*
* Find the event processor that owns the maximum number of partitions and steal a random partition
* from it.
*/
private String findPartitionToSteal(final Map<String, List<PartitionOwnership>> ownerPartitionMap) {
Map.Entry<String, List<PartitionOwnership>> ownerWithMaxPartitions = ownerPartitionMap.entrySet()
.stream()
.max(Comparator.comparingInt(entry -> entry.getValue().size()))
.get();
int numberOfPartitions = ownerWithMaxPartitions.getValue().size();
logger.info("Owner id {} owns {} partitions, stealing a partition from it", ownerWithMaxPartitions.getKey(),
numberOfPartitions);
return ownerWithMaxPartitions.getValue().get(RANDOM.nextInt(numberOfPartitions)).getPartitionId();
}
/*
* When the load is balanced, all active event processors own at least {@code minPartitionsPerEventProcessor}
* and only {@code numberOfEventProcessorsWithAdditionalPartition} event processors will own 1 additional
* partition.
*/
private boolean isLoadBalanced(final int minPartitionsPerEventProcessor,
final int numberOfEventProcessorsWithAdditionalPartition,
final Map<String, List<PartitionOwnership>> ownerPartitionMap) {
int count = 0;
for (List<PartitionOwnership> partitionOwnership : ownerPartitionMap.values()) {
int numberOfPartitions = partitionOwnership.size();
if (numberOfPartitions < minPartitionsPerEventProcessor
|| numberOfPartitions > minPartitionsPerEventProcessor + 1) {
return false;
}
if (numberOfPartitions == minPartitionsPerEventProcessor + 1) {
count++;
}
}
return count == numberOfEventProcessorsWithAdditionalPartition;
}
/*
* This method is called after determining that the load is not balanced. This method will evaluate
* if the current event processor should own more partitions. Specifically, this method returns true if the
* current event processor owns less than the minimum number of partitions or if it owns the minimum number
* and no other event processor owns lesser number of partitions than this event processor.
*/
private boolean shouldOwnMorePartitions(final int minPartitionsPerEventProcessor,
final Map<String, List<PartitionOwnership>> ownerPartitionMap) {
int numberOfPartitionsOwned = ownerPartitionMap.get(this.ownerId).size();
int leastPartitionsOwnedByAnyEventProcessor =
ownerPartitionMap.values().stream().min(Comparator.comparingInt(List::size)).get().size();
return numberOfPartitionsOwned < minPartitionsPerEventProcessor
|| numberOfPartitionsOwned == leastPartitionsOwnedByAnyEventProcessor;
}
/*
* This method will create a new map of partition id and PartitionOwnership containing only those partitions
* that are actively owned. All entries in the original map returned by CheckpointStore that haven't been
* modified for a duration of time greater than the allowed inactivity time limit are assumed to be owned by
* dead event processors. These will not be included in the map returned by this method.
*/
private Map<String, PartitionOwnership> removeInactivePartitionOwnerships(
final Map<String, PartitionOwnership> partitionOwnershipMap) {
return partitionOwnershipMap
.entrySet()
.stream()
.filter(entry -> {
return (System.currentTimeMillis() - entry.getValue().getLastModifiedTime() < TimeUnit.SECONDS
.toMillis(inactiveTimeLimitInSeconds))
&& !CoreUtils.isNullOrEmpty(entry.getValue().getOwnerId());
}).collect(Collectors.toMap(Entry::getKey, Entry::getValue));
}
private void claimOwnership(final Map<String, PartitionOwnership> partitionOwnershipMap, Map<String,
List<PartitionOwnership>> ownerPartitionsMap, final String partitionIdToClaim) {
logger.info("Attempting to claim ownership of partition {}", partitionIdToClaim);
PartitionOwnership ownershipRequest = createPartitionOwnershipRequest(partitionOwnershipMap,
partitionIdToClaim);
List<PartitionOwnership> partitionsToClaim = new ArrayList<>();
partitionsToClaim.add(ownershipRequest);
partitionsToClaim.addAll(partitionPumpManager.getPartitionPumps()
.keySet()
.stream()
.map(partitionId -> createPartitionOwnershipRequest(partitionOwnershipMap, partitionId))
.collect(Collectors.toList()));
checkpointStore
.claimOwnership(partitionsToClaim)
.timeout(Duration.ofMinutes(1))
.doOnNext(partitionOwnership -> logger.info("Successfully claimed ownership of partition {}",
partitionOwnership.getPartitionId()))
.doOnError(ex -> logger
.warning(Messages.FAILED_TO_CLAIM_OWNERSHIP, ownershipRequest.getPartitionId(),
ex.getMessage(), ex))
.collectList()
.zipWhen(ownershipList -> checkpointStore.listCheckpoints(fullyQualifiedNamespace, eventHubName,
consumerGroupName)
.collectMap(checkpoint -> checkpoint.getPartitionId(), Function.identity()))
.subscribe(ownedPartitionCheckpointsTuple -> {
ownedPartitionCheckpointsTuple.getT1()
.stream()
.forEach(po -> partitionPumpManager.startPartitionPump(po,
ownedPartitionCheckpointsTuple.getT2().get(po.getPartitionId())));
},
ex -> {
logger.warning("Error while listing checkpoints", ex);
ErrorContext errorContext = new ErrorContext(partitionAgnosticContext, ex);
processError.accept(errorContext);
throw logger.logExceptionAsError(new IllegalStateException("Error while listing checkpoints", ex));
});
}
private PartitionOwnership createPartitionOwnershipRequest(
final Map<String, PartitionOwnership> partitionOwnershipMap,
final String partitionIdToClaim) {
PartitionOwnership previousPartitionOwnership = partitionOwnershipMap.get(partitionIdToClaim);
PartitionOwnership partitionOwnershipRequest = new PartitionOwnership()
.setFullyQualifiedNamespace(this.fullyQualifiedNamespace)
.setOwnerId(this.ownerId)
.setPartitionId(partitionIdToClaim)
.setConsumerGroup(this.consumerGroupName)
.setEventHubName(this.eventHubName)
.setETag(previousPartitionOwnership == null ? null : previousPartitionOwnership.getETag());
return partitionOwnershipRequest;
}
} | |
ideally you can use xml parser to get the path of the filed of interest anyway, not a big deal for a test and may not worth the time. Thank you. | private String getVersionFromPomFile() {
String fileName = "pom.xml";
String versionStartTag = "<version>";
String versionEndTag = "</version>";
File file = new File(fileName);
try {
BufferedReader bufferedReader = new BufferedReader(new FileReader(file));
String line;
while((line = bufferedReader.readLine()) != null) {
if (line.contains(versionStartTag) && line.contains("azure-cosmos")) {
int startIndex = line.indexOf(versionStartTag);
int endIndex = line.indexOf(versionEndTag);
return line.substring(startIndex + versionStartTag.length(), endIndex);
}
}
} catch (IOException e) {
throw new RuntimeException("Error reading file " + fileName, e);
}
return null;
} | try { | private String getVersionFromPomFile() {
String fileName = "pom.xml";
String versionStartTag = "<version>";
String versionEndTag = "</version>";
File file = new File(fileName);
try {
BufferedReader bufferedReader = new BufferedReader(new FileReader(file));
String line;
while((line = bufferedReader.readLine()) != null) {
if (line.contains(versionStartTag) && line.contains("azure-cosmos")) {
int startIndex = line.indexOf(versionStartTag);
int endIndex = line.indexOf(versionEndTag);
return line.substring(startIndex + versionStartTag.length(), endIndex);
}
}
} catch (IOException e) {
throw new RuntimeException("Error reading file " + fileName, e);
}
return null;
} | class AzureCosmosPropertiesTest {
@Test(groups = "unit")
public void verifyAzureCosmosProperties() {
Map<String, String> properties =
CoreUtils.getProperties(HttpConstants.Versions.AZURE_COSMOS_PROPERTIES_FILE_NAME);
assertThat(properties).isNotNull();
assertThat(properties).isNotEmpty();
assertThat(properties.get("version")).isNotNull();
assertThat(properties.get("name")).isNotNull();
}
@Test(groups = "unit")
public void verifyProjectVersion() {
assertThat(HttpConstants.Versions.SDK_VERSION).isNotNull();
String pomFileVersion = getVersionFromPomFile();
assertThat(HttpConstants.Versions.SDK_VERSION).isEqualTo(pomFileVersion);
}
} | class AzureCosmosPropertiesTest {
@Test(groups = "unit")
public void verifyAzureCosmosProperties() {
Map<String, String> properties =
CoreUtils.getProperties(HttpConstants.Versions.AZURE_COSMOS_PROPERTIES_FILE_NAME);
assertThat(properties).isNotNull();
assertThat(properties).isNotEmpty();
assertThat(properties.get("version")).isNotNull();
assertThat(properties.get("name")).isNotNull();
}
@Test(groups = "unit")
public void verifyProjectVersion() {
assertThat(HttpConstants.Versions.SDK_VERSION).isNotNull();
String pomFileVersion = getVersionFromPomFile();
assertThat(HttpConstants.Versions.SDK_VERSION).isEqualTo(pomFileVersion);
}
} |
Yeah, didn't want to add extra dependency :) but yes, I agree. We could use that. | private String getVersionFromPomFile() {
String fileName = "pom.xml";
String versionStartTag = "<version>";
String versionEndTag = "</version>";
File file = new File(fileName);
try {
BufferedReader bufferedReader = new BufferedReader(new FileReader(file));
String line;
while((line = bufferedReader.readLine()) != null) {
if (line.contains(versionStartTag) && line.contains("azure-cosmos")) {
int startIndex = line.indexOf(versionStartTag);
int endIndex = line.indexOf(versionEndTag);
return line.substring(startIndex + versionStartTag.length(), endIndex);
}
}
} catch (IOException e) {
throw new RuntimeException("Error reading file " + fileName, e);
}
return null;
} | try { | private String getVersionFromPomFile() {
String fileName = "pom.xml";
String versionStartTag = "<version>";
String versionEndTag = "</version>";
File file = new File(fileName);
try {
BufferedReader bufferedReader = new BufferedReader(new FileReader(file));
String line;
while((line = bufferedReader.readLine()) != null) {
if (line.contains(versionStartTag) && line.contains("azure-cosmos")) {
int startIndex = line.indexOf(versionStartTag);
int endIndex = line.indexOf(versionEndTag);
return line.substring(startIndex + versionStartTag.length(), endIndex);
}
}
} catch (IOException e) {
throw new RuntimeException("Error reading file " + fileName, e);
}
return null;
} | class AzureCosmosPropertiesTest {
@Test(groups = "unit")
public void verifyAzureCosmosProperties() {
Map<String, String> properties =
CoreUtils.getProperties(HttpConstants.Versions.AZURE_COSMOS_PROPERTIES_FILE_NAME);
assertThat(properties).isNotNull();
assertThat(properties).isNotEmpty();
assertThat(properties.get("version")).isNotNull();
assertThat(properties.get("name")).isNotNull();
}
@Test(groups = "unit")
public void verifyProjectVersion() {
assertThat(HttpConstants.Versions.SDK_VERSION).isNotNull();
String pomFileVersion = getVersionFromPomFile();
assertThat(HttpConstants.Versions.SDK_VERSION).isEqualTo(pomFileVersion);
}
} | class AzureCosmosPropertiesTest {
@Test(groups = "unit")
public void verifyAzureCosmosProperties() {
Map<String, String> properties =
CoreUtils.getProperties(HttpConstants.Versions.AZURE_COSMOS_PROPERTIES_FILE_NAME);
assertThat(properties).isNotNull();
assertThat(properties).isNotEmpty();
assertThat(properties.get("version")).isNotNull();
assertThat(properties.get("name")).isNotNull();
}
@Test(groups = "unit")
public void verifyProjectVersion() {
assertThat(HttpConstants.Versions.SDK_VERSION).isNotNull();
String pomFileVersion = getVersionFromPomFile();
assertThat(HttpConstants.Versions.SDK_VERSION).isEqualTo(pomFileVersion);
}
} |
This is cool. We do additional regex testing to test over the version format. | public void verifyProjectVersion() {
assertThat(HttpConstants.Versions.SDK_VERSION).isNotNull();
String pomFileVersion = getVersionFromPomFile();
assertThat(HttpConstants.Versions.SDK_VERSION).isEqualTo(pomFileVersion);
} | assertThat(HttpConstants.Versions.SDK_VERSION).isEqualTo(pomFileVersion); | public void verifyProjectVersion() {
assertThat(HttpConstants.Versions.SDK_VERSION).isNotNull();
String pomFileVersion = getVersionFromPomFile();
assertThat(HttpConstants.Versions.SDK_VERSION).isEqualTo(pomFileVersion);
} | class AzureCosmosPropertiesTest {
@Test(groups = "unit")
public void verifyAzureCosmosProperties() {
Map<String, String> properties =
CoreUtils.getProperties(HttpConstants.Versions.AZURE_COSMOS_PROPERTIES_FILE_NAME);
assertThat(properties).isNotNull();
assertThat(properties).isNotEmpty();
assertThat(properties.get("version")).isNotNull();
assertThat(properties.get("name")).isNotNull();
}
@Test(groups = "unit")
private String getVersionFromPomFile() {
String fileName = "pom.xml";
String versionStartTag = "<version>";
String versionEndTag = "</version>";
File file = new File(fileName);
try {
BufferedReader bufferedReader = new BufferedReader(new FileReader(file));
String line;
while((line = bufferedReader.readLine()) != null) {
if (line.contains(versionStartTag) && line.contains("azure-cosmos")) {
int startIndex = line.indexOf(versionStartTag);
int endIndex = line.indexOf(versionEndTag);
return line.substring(startIndex + versionStartTag.length(), endIndex);
}
}
} catch (IOException e) {
throw new RuntimeException("Error reading file " + fileName, e);
}
return null;
}
} | class AzureCosmosPropertiesTest {
@Test(groups = "unit")
public void verifyAzureCosmosProperties() {
Map<String, String> properties =
CoreUtils.getProperties(HttpConstants.Versions.AZURE_COSMOS_PROPERTIES_FILE_NAME);
assertThat(properties).isNotNull();
assertThat(properties).isNotEmpty();
assertThat(properties.get("version")).isNotNull();
assertThat(properties.get("name")).isNotNull();
}
@Test(groups = "unit")
private String getVersionFromPomFile() {
String fileName = "pom.xml";
String versionStartTag = "<version>";
String versionEndTag = "</version>";
File file = new File(fileName);
try {
BufferedReader bufferedReader = new BufferedReader(new FileReader(file));
String line;
while((line = bufferedReader.readLine()) != null) {
if (line.contains(versionStartTag) && line.contains("azure-cosmos")) {
int startIndex = line.indexOf(versionStartTag);
int endIndex = line.indexOf(versionEndTag);
return line.substring(startIndex + versionStartTag.length(), endIndex);
}
}
} catch (IOException e) {
throw new RuntimeException("Error reading file " + fileName, e);
}
return null;
}
} |
This recomputes the cmp, instead you can use signum | public int compare(OrderByRowResult<T> r1, OrderByRowResult<T> r2) {
try {
List<QueryItem> result1 = r1.getOrderByItems();
List<QueryItem> result2 = r2.getOrderByItems();
if (result1.size() != result2.size()) {
throw new IllegalStateException("OrderByItems cannot have different sizes.");
}
if (result1.size() != this.sortOrders.size()) {
throw new IllegalStateException("OrderByItems cannot have a different size than sort orders.");
}
if (this.itemTypes == null) {
synchronized (this) {
if (this.itemTypes == null) {
this.itemTypes = new ArrayList<ItemType>(result1.size());
for (QueryItem item : result1) {
this.itemTypes.add(ItemTypeHelper.getOrderByItemType(item.getItem()));
}
}
}
}
this.checkOrderByItemType(result1);
this.checkOrderByItemType(result2);
for (int i = 0; i < result1.size(); ++i) {
int cmp = ItemComparator.getInstance().compare(result1.get(i).getItem(), result2.get(i).getItem());
if (cmp != 0) {
switch (this.sortOrders.get(i)) {
case Ascending:
return cmp;
case Descending:
return ItemComparator.getInstance().compare(result2.get(i).getItem(), result1.get(i).getItem());
}
}
}
return r1.getSourcePartitionKeyRange().getMinInclusive().compareTo(r2.getSourcePartitionKeyRange().getMinInclusive());
} catch (Exception e) {
logger.error("Orderby Row comparision failed {}, {}", r1.toJson(), r2.toJson(), e);
throw e;
}
} | return ItemComparator.getInstance().compare(result2.get(i).getItem(), result1.get(i).getItem()); | public int compare(OrderByRowResult<T> r1, OrderByRowResult<T> r2) {
try {
List<QueryItem> result1 = r1.getOrderByItems();
List<QueryItem> result2 = r2.getOrderByItems();
if (result1.size() != result2.size()) {
throw new IllegalStateException("OrderByItems cannot have different sizes.");
}
if (result1.size() != this.sortOrders.size()) {
throw new IllegalStateException("OrderByItems cannot have a different size than sort orders.");
}
if (this.itemTypes == null) {
synchronized (this) {
if (this.itemTypes == null) {
this.itemTypes = new ArrayList<ItemType>(result1.size());
for (QueryItem item : result1) {
this.itemTypes.add(ItemTypeHelper.getOrderByItemType(item.getItem()));
}
}
}
}
this.checkOrderByItemType(result1);
this.checkOrderByItemType(result2);
for (int i = 0; i < result1.size(); ++i) {
int cmp = ItemComparator.getInstance().compare(result1.get(i).getItem(), result2.get(i).getItem());
if (cmp != 0) {
switch (this.sortOrders.get(i)) {
case Ascending:
return signum(cmp);
case Descending:
return -signum(cmp);
}
}
}
return r1.getSourcePartitionKeyRange().getMinInclusive().compareTo(r2.getSourcePartitionKeyRange().getMinInclusive());
} catch (Exception e) {
logger.error("Orderby Row comparision failed {}, {}", r1.toJson(), r2.toJson(), e);
throw e;
}
} | class OrderbyRowComparer<T> implements Comparator<OrderByRowResult<T>>, Serializable {
private static final Logger logger = LoggerFactory.getLogger(OrderbyRowComparer.class);
private static final long serialVersionUID = 7296627879628897315L;
private final List<SortOrder> sortOrders;
private volatile List<ItemType> itemTypes;
public OrderbyRowComparer(Collection<SortOrder> sortOrders) {
this.sortOrders = new ArrayList<>(sortOrders);
}
@Override
private void checkOrderByItemType(List<QueryItem> orderByItems) {
for (int i = 0; i < this.itemTypes.size(); ++i) {
ItemType type = ItemTypeHelper.getOrderByItemType(orderByItems.get(i).getItem());
if (type != this.itemTypes.get(i)) {
throw new UnsupportedOperationException(
String.format("Expected %s, but got %s.", this.itemTypes.get(i).toString(), type.toString()));
}
}
}
public List<SortOrder> getSortOrders() {
return this.sortOrders;
}
} | class OrderbyRowComparer<T> implements Comparator<OrderByRowResult<T>>, Serializable {
private static final Logger logger = LoggerFactory.getLogger(OrderbyRowComparer.class);
private static final long serialVersionUID = 7296627879628897315L;
private final List<SortOrder> sortOrders;
private volatile List<ItemType> itemTypes;
public OrderbyRowComparer(Collection<SortOrder> sortOrders) {
this.sortOrders = new ArrayList<>(sortOrders);
}
@Override
private void checkOrderByItemType(List<QueryItem> orderByItems) {
for (int i = 0; i < this.itemTypes.size(); ++i) {
ItemType type = ItemTypeHelper.getOrderByItemType(orderByItems.get(i).getItem());
if (type != this.itemTypes.get(i)) {
throw new UnsupportedOperationException(
String.format("Expected %s, but got %s.", this.itemTypes.get(i).toString(), type.toString()));
}
}
}
public List<SortOrder> getSortOrders() {
return this.sortOrders;
}
} |
Yes, I was debating the same if I should recompute or use +1/-1. Using signum is a good idea. I've made the change. I've changed the Ascending case as well to keep the result consistent. Please let me know if the change looks good. | public int compare(OrderByRowResult<T> r1, OrderByRowResult<T> r2) {
try {
List<QueryItem> result1 = r1.getOrderByItems();
List<QueryItem> result2 = r2.getOrderByItems();
if (result1.size() != result2.size()) {
throw new IllegalStateException("OrderByItems cannot have different sizes.");
}
if (result1.size() != this.sortOrders.size()) {
throw new IllegalStateException("OrderByItems cannot have a different size than sort orders.");
}
if (this.itemTypes == null) {
synchronized (this) {
if (this.itemTypes == null) {
this.itemTypes = new ArrayList<ItemType>(result1.size());
for (QueryItem item : result1) {
this.itemTypes.add(ItemTypeHelper.getOrderByItemType(item.getItem()));
}
}
}
}
this.checkOrderByItemType(result1);
this.checkOrderByItemType(result2);
for (int i = 0; i < result1.size(); ++i) {
int cmp = ItemComparator.getInstance().compare(result1.get(i).getItem(), result2.get(i).getItem());
if (cmp != 0) {
switch (this.sortOrders.get(i)) {
case Ascending:
return cmp;
case Descending:
return ItemComparator.getInstance().compare(result2.get(i).getItem(), result1.get(i).getItem());
}
}
}
return r1.getSourcePartitionKeyRange().getMinInclusive().compareTo(r2.getSourcePartitionKeyRange().getMinInclusive());
} catch (Exception e) {
logger.error("Orderby Row comparision failed {}, {}", r1.toJson(), r2.toJson(), e);
throw e;
}
} | return ItemComparator.getInstance().compare(result2.get(i).getItem(), result1.get(i).getItem()); | public int compare(OrderByRowResult<T> r1, OrderByRowResult<T> r2) {
try {
List<QueryItem> result1 = r1.getOrderByItems();
List<QueryItem> result2 = r2.getOrderByItems();
if (result1.size() != result2.size()) {
throw new IllegalStateException("OrderByItems cannot have different sizes.");
}
if (result1.size() != this.sortOrders.size()) {
throw new IllegalStateException("OrderByItems cannot have a different size than sort orders.");
}
if (this.itemTypes == null) {
synchronized (this) {
if (this.itemTypes == null) {
this.itemTypes = new ArrayList<ItemType>(result1.size());
for (QueryItem item : result1) {
this.itemTypes.add(ItemTypeHelper.getOrderByItemType(item.getItem()));
}
}
}
}
this.checkOrderByItemType(result1);
this.checkOrderByItemType(result2);
for (int i = 0; i < result1.size(); ++i) {
int cmp = ItemComparator.getInstance().compare(result1.get(i).getItem(), result2.get(i).getItem());
if (cmp != 0) {
switch (this.sortOrders.get(i)) {
case Ascending:
return signum(cmp);
case Descending:
return -signum(cmp);
}
}
}
return r1.getSourcePartitionKeyRange().getMinInclusive().compareTo(r2.getSourcePartitionKeyRange().getMinInclusive());
} catch (Exception e) {
logger.error("Orderby Row comparision failed {}, {}", r1.toJson(), r2.toJson(), e);
throw e;
}
} | class OrderbyRowComparer<T> implements Comparator<OrderByRowResult<T>>, Serializable {
private static final Logger logger = LoggerFactory.getLogger(OrderbyRowComparer.class);
private static final long serialVersionUID = 7296627879628897315L;
private final List<SortOrder> sortOrders;
private volatile List<ItemType> itemTypes;
public OrderbyRowComparer(Collection<SortOrder> sortOrders) {
this.sortOrders = new ArrayList<>(sortOrders);
}
@Override
private void checkOrderByItemType(List<QueryItem> orderByItems) {
for (int i = 0; i < this.itemTypes.size(); ++i) {
ItemType type = ItemTypeHelper.getOrderByItemType(orderByItems.get(i).getItem());
if (type != this.itemTypes.get(i)) {
throw new UnsupportedOperationException(
String.format("Expected %s, but got %s.", this.itemTypes.get(i).toString(), type.toString()));
}
}
}
public List<SortOrder> getSortOrders() {
return this.sortOrders;
}
} | class OrderbyRowComparer<T> implements Comparator<OrderByRowResult<T>>, Serializable {
private static final Logger logger = LoggerFactory.getLogger(OrderbyRowComparer.class);
private static final long serialVersionUID = 7296627879628897315L;
private final List<SortOrder> sortOrders;
private volatile List<ItemType> itemTypes;
public OrderbyRowComparer(Collection<SortOrder> sortOrders) {
this.sortOrders = new ArrayList<>(sortOrders);
}
@Override
private void checkOrderByItemType(List<QueryItem> orderByItems) {
for (int i = 0; i < this.itemTypes.size(); ++i) {
ItemType type = ItemTypeHelper.getOrderByItemType(orderByItems.get(i).getItem());
if (type != this.itemTypes.get(i)) {
throw new UnsupportedOperationException(
String.format("Expected %s, but got %s.", this.itemTypes.get(i).toString(), type.toString()));
}
}
}
public List<SortOrder> getSortOrders() {
return this.sortOrders;
}
} |
why do we need these overridden methods? the implementation is the same as parent class. | public Object get(String propertyName) {
return super.get(propertyName);
} | return super.get(propertyName); | public Object get(String propertyName) {
return super.get(propertyName);
} | class DatabaseAccount extends Resource {
private ConsistencyPolicy consistencyPolicy;
private long maxMediaStorageUsageInMB;
private long mediaStorageUsageInMB;
private ReplicationPolicy replicationPolicy;
private ReplicationPolicy systemReplicationPolicy;
private Map<String, Object> queryEngineConfiguration;
/**
* Constructor.
*
* @param objectNode the {@link ObjectNode} that represent the
* {@link JsonSerializable}
*/
DatabaseAccount(ObjectNode objectNode) {
super(objectNode);
}
/**
* Constructor.
*/
public DatabaseAccount() {
BridgeInternal.setResourceSelfLink(this, "");
}
/**
* Initialize a database account object from json string.
*
* @param jsonString the json string that represents the database account.
*/
public DatabaseAccount(String jsonString) {
super(jsonString);
}
/**
* Get the databases link of the databaseAccount.
*
* @return the databases link.
*/
String getDatabasesLink() {
return super.getString(Constants.Properties.DATABASES_LINK);
}
/**
* Set the databases of the databaseAccount.
*
* @param databasesLink the databases link.
*/
void setDatabasesLink(String databasesLink) {
BridgeInternal.setProperty(this, Constants.Properties.DATABASES_LINK, databasesLink);
}
/**
* Get the medialink of the databaseAccount.
*
* @return the media link.
*/
String getMediaLink() {
return super.getString(Constants.Properties.MEDIA_LINK);
}
/**
* Set the medialink of the databaseAccount.
*
* @param medialink the media link.
*/
void setMediaLink(String medialink) {
BridgeInternal.setProperty(this, Constants.Properties.MEDIA_LINK, medialink);
}
/**
* Get the addresseslink of the databaseAccount.
*
* @return the addresses link.
*/
String getAddressesLink() {
return super.getString(Constants.Properties.ADDRESS_LINK);
}
/**
* Set the addresseslink of the databaseAccount.
*
* @param addresseslink the addresses link.
*/
void setAddressesLink(String addresseslink) {
BridgeInternal.setProperty(this, Constants.Properties.ADDRESS_LINK, addresseslink);
}
/**
* Attachment content (media) storage quota in MBs Retrieved from gateway.
*
* @return the max media storage usage in MB.
*/
long getMaxMediaStorageUsageInMB() {
return this.maxMediaStorageUsageInMB;
}
public void setMaxMediaStorageUsageInMB(long value) {
this.maxMediaStorageUsageInMB = value;
}
/**
* Current attachment content (media) usage in MBs.
* <p>
* Retrieved from gateway. Value is returned from cached information updated
* periodically and is not guaranteed to be real time.
*
* @return the media storage usage in MB.
*/
long getMediaStorageUsageInMB() {
return this.mediaStorageUsageInMB;
}
public void setMediaStorageUsageInMB(long value) {
this.mediaStorageUsageInMB = value;
}
/**
* Gets the ConsistencyPolicy properties.
*
* @return the consistency policy.
*/
public ConsistencyPolicy getConsistencyPolicy() {
if (this.consistencyPolicy == null) {
this.consistencyPolicy = super.getObject(Constants.Properties.USER_CONSISTENCY_POLICY,
ConsistencyPolicy.class);
if (this.consistencyPolicy == null) {
this.consistencyPolicy = new ConsistencyPolicy();
}
}
return this.consistencyPolicy;
}
/**
* Gets the ReplicationPolicy properties.
*
* @return the replication policy.
*/
public ReplicationPolicy getReplicationPolicy() {
if (this.replicationPolicy == null) {
this.replicationPolicy = super.getObject(Constants.Properties.USER_REPLICATION_POLICY,
ReplicationPolicy.class);
if (this.replicationPolicy == null) {
this.replicationPolicy = new ReplicationPolicy();
}
}
return this.replicationPolicy;
}
/**
* Gets the SystemReplicationPolicy properties.
*
* @return the system replication policy.
*/
public ReplicationPolicy getSystemReplicationPolicy() {
if (this.systemReplicationPolicy == null) {
this.systemReplicationPolicy = super.getObject(Constants.Properties.SYSTEM_REPLICATION_POLICY,
ReplicationPolicy.class);
if (this.systemReplicationPolicy == null) {
this.systemReplicationPolicy = new ReplicationPolicy();
}
}
return this.systemReplicationPolicy;
}
/**
* Gets the QueryEngineConfiguration properties.
*
* @return the query engine configuration.
*/
public Map<String, Object> getQueryEngineConfiguration() {
if (this.queryEngineConfiguration == null) {
String queryEngineConfigurationJsonString = super.getObject(Constants.Properties.QUERY_ENGINE_CONFIGURATION,
String.class);
if (StringUtils.isNotEmpty(queryEngineConfigurationJsonString)) {
TypeReference<HashMap<String, Object>> typeRef = new TypeReference<HashMap<String, Object>>() {
};
try {
this.queryEngineConfiguration = Utils.getSimpleObjectMapper()
.readValue(queryEngineConfigurationJsonString, typeRef);
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
if (this.queryEngineConfiguration == null) {
this.queryEngineConfiguration = new HashMap<>();
}
}
}
return this.queryEngineConfiguration;
}
/**
* Gets the list of writable locations for this database account.
*
* @return the list of writable locations.
*/
public Iterable<DatabaseAccountLocation> getWritableLocations() {
return super.getCollection(Constants.Properties.WRITABLE_LOCATIONS, DatabaseAccountLocation.class);
}
/**
* Sets the list of writable locations for this database account.
* <p>
* The list of writable locations are returned by the service.
*
* @param locations the list of writable locations.
*/
public void setWritableLocations(Iterable<DatabaseAccountLocation> locations) {
BridgeInternal.setProperty(this, Constants.Properties.WRITABLE_LOCATIONS, locations);
}
/**
* Gets the list of readable locations for this database account.
*
* @return the list of readable locations.
*/
public Iterable<DatabaseAccountLocation> getReadableLocations() {
return super.getCollection(Constants.Properties.READABLE_LOCATIONS, DatabaseAccountLocation.class);
}
/**
* Sets the list of readable locations for this database account.
* <p>
* The list of readable locations are returned by the service.
*
* @param locations the list of readable locations.
*/
public void setReadableLocations(Iterable<DatabaseAccountLocation> locations) {
BridgeInternal.setProperty(this, Constants.Properties.READABLE_LOCATIONS, locations);
}
/**
* Gets if enable multiple write locations is set.
*
* @return the true if multiple write locations are set
*/
public boolean getEnableMultipleWriteLocations() {
return ObjectUtils.defaultIfNull(super.getBoolean(Constants.Properties.ENABLE_MULTIPLE_WRITE_LOCATIONS), false);
}
public void setEnableMultipleWriteLocations(boolean value) {
BridgeInternal.setProperty(this, Constants.Properties.ENABLE_MULTIPLE_WRITE_LOCATIONS, value);
}
protected void populatePropertyBag() {
super.populatePropertyBag();
if (this.consistencyPolicy != null) {
ModelBridgeInternal.populatePropertyBagJsonSerializable(this.consistencyPolicy);
BridgeInternal.setProperty(this, Constants.Properties.USER_CONSISTENCY_POLICY, this.consistencyPolicy);
}
}
@Override
public String toJson() {
this.populatePropertyBag();
return super.toJson();
}
@Override
} | class DatabaseAccount extends Resource {
private ConsistencyPolicy consistencyPolicy;
private long maxMediaStorageUsageInMB;
private long mediaStorageUsageInMB;
private ReplicationPolicy replicationPolicy;
private ReplicationPolicy systemReplicationPolicy;
private Map<String, Object> queryEngineConfiguration;
/**
* Constructor.
*
* @param objectNode the {@link ObjectNode} that represent the
* {@link JsonSerializable}
*/
DatabaseAccount(ObjectNode objectNode) {
super(objectNode);
}
/**
* Constructor.
*/
public DatabaseAccount() {
BridgeInternal.setResourceSelfLink(this, "");
}
/**
* Initialize a database account object from json string.
*
* @param jsonString the json string that represents the database account.
*/
public DatabaseAccount(String jsonString) {
super(jsonString);
}
/**
* Get the databases link of the databaseAccount.
*
* @return the databases link.
*/
String getDatabasesLink() {
return super.getString(Constants.Properties.DATABASES_LINK);
}
/**
* Set the databases of the databaseAccount.
*
* @param databasesLink the databases link.
*/
void setDatabasesLink(String databasesLink) {
BridgeInternal.setProperty(this, Constants.Properties.DATABASES_LINK, databasesLink);
}
/**
* Get the medialink of the databaseAccount.
*
* @return the media link.
*/
String getMediaLink() {
return super.getString(Constants.Properties.MEDIA_LINK);
}
/**
* Set the medialink of the databaseAccount.
*
* @param medialink the media link.
*/
void setMediaLink(String medialink) {
BridgeInternal.setProperty(this, Constants.Properties.MEDIA_LINK, medialink);
}
/**
* Get the addresseslink of the databaseAccount.
*
* @return the addresses link.
*/
String getAddressesLink() {
return super.getString(Constants.Properties.ADDRESS_LINK);
}
/**
* Set the addresseslink of the databaseAccount.
*
* @param addresseslink the addresses link.
*/
void setAddressesLink(String addresseslink) {
BridgeInternal.setProperty(this, Constants.Properties.ADDRESS_LINK, addresseslink);
}
/**
* Attachment content (media) storage quota in MBs Retrieved from gateway.
*
* @return the max media storage usage in MB.
*/
long getMaxMediaStorageUsageInMB() {
return this.maxMediaStorageUsageInMB;
}
public void setMaxMediaStorageUsageInMB(long value) {
this.maxMediaStorageUsageInMB = value;
}
/**
* Current attachment content (media) usage in MBs.
* <p>
* Retrieved from gateway. Value is returned from cached information updated
* periodically and is not guaranteed to be real time.
*
* @return the media storage usage in MB.
*/
long getMediaStorageUsageInMB() {
return this.mediaStorageUsageInMB;
}
public void setMediaStorageUsageInMB(long value) {
this.mediaStorageUsageInMB = value;
}
/**
* Gets the ConsistencyPolicy properties.
*
* @return the consistency policy.
*/
public ConsistencyPolicy getConsistencyPolicy() {
if (this.consistencyPolicy == null) {
this.consistencyPolicy = super.getObject(Constants.Properties.USER_CONSISTENCY_POLICY,
ConsistencyPolicy.class);
if (this.consistencyPolicy == null) {
this.consistencyPolicy = new ConsistencyPolicy();
}
}
return this.consistencyPolicy;
}
/**
* Gets the ReplicationPolicy properties.
*
* @return the replication policy.
*/
public ReplicationPolicy getReplicationPolicy() {
if (this.replicationPolicy == null) {
this.replicationPolicy = super.getObject(Constants.Properties.USER_REPLICATION_POLICY,
ReplicationPolicy.class);
if (this.replicationPolicy == null) {
this.replicationPolicy = new ReplicationPolicy();
}
}
return this.replicationPolicy;
}
/**
* Gets the SystemReplicationPolicy properties.
*
* @return the system replication policy.
*/
public ReplicationPolicy getSystemReplicationPolicy() {
if (this.systemReplicationPolicy == null) {
this.systemReplicationPolicy = super.getObject(Constants.Properties.SYSTEM_REPLICATION_POLICY,
ReplicationPolicy.class);
if (this.systemReplicationPolicy == null) {
this.systemReplicationPolicy = new ReplicationPolicy();
}
}
return this.systemReplicationPolicy;
}
/**
* Gets the QueryEngineConfiguration properties.
*
* @return the query engine configuration.
*/
public Map<String, Object> getQueryEngineConfiguration() {
if (this.queryEngineConfiguration == null) {
String queryEngineConfigurationJsonString = super.getObject(Constants.Properties.QUERY_ENGINE_CONFIGURATION,
String.class);
if (StringUtils.isNotEmpty(queryEngineConfigurationJsonString)) {
TypeReference<HashMap<String, Object>> typeRef = new TypeReference<HashMap<String, Object>>() {
};
try {
this.queryEngineConfiguration = Utils.getSimpleObjectMapper()
.readValue(queryEngineConfigurationJsonString, typeRef);
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
if (this.queryEngineConfiguration == null) {
this.queryEngineConfiguration = new HashMap<>();
}
}
}
return this.queryEngineConfiguration;
}
/**
* Gets the list of writable locations for this database account.
*
* @return the list of writable locations.
*/
public Iterable<DatabaseAccountLocation> getWritableLocations() {
return super.getCollection(Constants.Properties.WRITABLE_LOCATIONS, DatabaseAccountLocation.class);
}
/**
* Sets the list of writable locations for this database account.
* <p>
* The list of writable locations are returned by the service.
*
* @param locations the list of writable locations.
*/
public void setWritableLocations(Iterable<DatabaseAccountLocation> locations) {
BridgeInternal.setProperty(this, Constants.Properties.WRITABLE_LOCATIONS, locations);
}
/**
* Gets the list of readable locations for this database account.
*
* @return the list of readable locations.
*/
public Iterable<DatabaseAccountLocation> getReadableLocations() {
return super.getCollection(Constants.Properties.READABLE_LOCATIONS, DatabaseAccountLocation.class);
}
/**
* Sets the list of readable locations for this database account.
* <p>
* The list of readable locations are returned by the service.
*
* @param locations the list of readable locations.
*/
public void setReadableLocations(Iterable<DatabaseAccountLocation> locations) {
BridgeInternal.setProperty(this, Constants.Properties.READABLE_LOCATIONS, locations);
}
/**
* Gets if enable multiple write locations is set.
*
* @return the true if multiple write locations are set
*/
public boolean getEnableMultipleWriteLocations() {
return ObjectUtils.defaultIfNull(super.getBoolean(Constants.Properties.ENABLE_MULTIPLE_WRITE_LOCATIONS), false);
}
public void setEnableMultipleWriteLocations(boolean value) {
BridgeInternal.setProperty(this, Constants.Properties.ENABLE_MULTIPLE_WRITE_LOCATIONS, value);
}
protected void populatePropertyBag() {
super.populatePropertyBag();
if (this.consistencyPolicy != null) {
ModelBridgeInternal.populatePropertyBagJsonSerializable(this.consistencyPolicy);
BridgeInternal.setProperty(this, Constants.Properties.USER_CONSISTENCY_POLICY, this.consistencyPolicy);
}
}
@Override
public String toJson() {
this.populatePropertyBag();
return super.toJson();
}
@Override
} |
Javadoc missing for these methods. Also, look at other model classes in public package to ensure all of them have javadocs. | public Float getConfidence() {
return super.getConfidence();
} | return super.getConfidence(); | public Float getConfidence() {
return super.getConfidence();
} | class StringValue extends FieldValue<String> {
/*
* String value.
*/
private String valueString;
/**
* Constructs a StringValue.
*
* @param text The text content of the extracted field.
* @param boundingBox Bounding box of the field value.
* @param valueString String value.
*/
public StringValue(String text, BoundingBox boundingBox, String valueString) {
super(text, boundingBox);
this.valueString = valueString;
}
@Override
public String getValue() {
return this.valueString;
}
@Override
public void setValue(String value) {
this.valueString = value;
}
@Override
public List<Element> getElements() {
return super.getElements();
}
@Override
} | class StringValue extends FieldValue<String> {
/*
* String value.
*/
private final String valueString;
/*
* Type of the FieldValue.
*/
private final FieldValueType fieldValueType;
/**
* Constructs a StringValue.
*
* @param text The text content of the extracted field.
* @param boundingBox Bounding box of the field value.
* @param valueString String value.
* @param pageNumber The page number on which this field exists.
*/
public StringValue(String text, BoundingBox boundingBox, String valueString, int pageNumber) {
super(text, boundingBox, pageNumber);
this.valueString = valueString;
this.fieldValueType = FieldValueType.STRING;
}
/**
* {@inheritDoc}
*/
@Override
public int getPageNumber() {
return super.getPageNumber();
}
/**
* {@inheritDoc}
*/
@Override
public BoundingBox getBoundingBox() {
return super.getBoundingBox();
}
/**
* {@inheritDoc}
*/
@Override
public String getText() {
return super.getText();
}
/**
* {@inheritDoc}
*/
@Override
public String getValue() {
return this.valueString;
}
/**
* {@inheritDoc}
*/
@Override
public FieldValueType getType() {
return this.fieldValueType;
}
/**
* {@inheritDoc}
*/
@Override
public List<Element> getElements() {
return super.getElements();
}
/**
* {@inheritDoc}
*/
@Override
} |
Let's move this above any other work | public FormRecognizerAsyncClient buildAsyncClient() {
final Configuration buildConfiguration = (configuration == null)
? Configuration.getGlobalConfiguration().clone() : configuration;
final FormRecognizerServiceVersion serviceVersion =
version != null ? version : FormRecognizerServiceVersion.getLatest();
Objects.requireNonNull(endpoint, "'Endpoint' is required and can not be null.");
HttpPipeline pipeline = httpPipeline;
if (pipeline == null) {
final List<HttpPipelinePolicy> policies = new ArrayList<>();
if (credential != null) {
policies.add(new AzureKeyCredentialPolicy(OCP_APIM_SUBSCRIPTION_KEY, credential));
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
policies.add(new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion,
buildConfiguration));
policies.add(new RequestIdPolicy());
policies.add(new AddHeadersPolicy(headers));
policies.add(new AddDatePolicy());
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(retryPolicy == null ? DEFAULT_RETRY_POLICY : retryPolicy);
policies.addAll(this.policies);
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
pipeline = new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.build();
}
final FormRecognizerClientImpl formRecognizerAPI = new FormRecognizerClientImplBuilder()
.endpoint(endpoint)
.pipeline(pipeline)
.build();
return new FormRecognizerAsyncClient(formRecognizerAPI, serviceVersion);
} | Objects.requireNonNull(endpoint, "'Endpoint' is required and can not be null."); | public FormRecognizerAsyncClient buildAsyncClient() {
Objects.requireNonNull(endpoint, "'Endpoint' is required and can not be null.");
final Configuration buildConfiguration = (configuration == null)
? Configuration.getGlobalConfiguration().clone() : configuration;
final FormRecognizerServiceVersion serviceVersion =
version != null ? version : FormRecognizerServiceVersion.getLatest();
HttpPipeline pipeline = httpPipeline;
if (pipeline == null) {
final List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion,
buildConfiguration));
policies.add(new RequestIdPolicy());
policies.add(new AddHeadersPolicy(headers));
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(retryPolicy == null ? DEFAULT_RETRY_POLICY : retryPolicy);
policies.add(new AddDatePolicy());
if (credential != null) {
policies.add(new AzureKeyCredentialPolicy(OCP_APIM_SUBSCRIPTION_KEY, credential));
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
policies.addAll(this.policies);
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
pipeline = new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.build();
}
final FormRecognizerClientImpl formRecognizerAPI = new FormRecognizerClientImplBuilder()
.endpoint(endpoint)
.pipeline(pipeline)
.build();
return new FormRecognizerAsyncClient(formRecognizerAPI, serviceVersion);
} | class FormRecognizerClientBuilder {
private static final String ECHO_REQUEST_ID_HEADER = "x-ms-return-client-request-id";
private static final String CONTENT_TYPE_HEADER_VALUE = "application/json";
private static final String ACCEPT_HEADER = "Accept";
private static final String FORM_RECOGNIZER_PROPERTIES = "azure-ai-formrecognizer.properties";
static final String OCP_APIM_SUBSCRIPTION_KEY = "Ocp-Apim-Subscription-Key";
private static final String NAME = "name";
private static final String VERSION = "version";
private static final RetryPolicy DEFAULT_RETRY_POLICY = new RetryPolicy("retry-after-ms", ChronoUnit.MILLIS);
private final ClientLogger logger = new ClientLogger(FormRecognizerClientBuilder.class);
private final List<HttpPipelinePolicy> policies;
private final HttpHeaders headers;
private final String clientName;
private final String clientVersion;
private String endpoint;
private AzureKeyCredential credential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private HttpPipeline httpPipeline;
private Configuration configuration;
private RetryPolicy retryPolicy;
private FormRecognizerServiceVersion version;
/**
* The constructor with defaults.
*/
public FormRecognizerClientBuilder() {
policies = new ArrayList<>();
httpLogOptions = new HttpLogOptions();
Map<String, String> properties = CoreUtils.getProperties(FORM_RECOGNIZER_PROPERTIES);
clientName = properties.getOrDefault(NAME, "UnknownName");
clientVersion = properties.getOrDefault(VERSION, "UnknownVersion");
headers = new HttpHeaders()
.put(ECHO_REQUEST_ID_HEADER, "true")
.put(ACCEPT_HEADER, CONTENT_TYPE_HEADER_VALUE);
}
/**
* Creates a {@link FormRecognizerClient} based on options set in the builder. Every time
* {@code buildClient()} is called a new instance of {@link FormRecognizerClient} is created.
*
* <p>
* If {@link
* {@link
* settings are ignored
* </p>
*
* @return A FormRecognizerClient with the options set from the builder.
* @throws NullPointerException if {@link
* {@link
* @throws IllegalArgumentException if {@link
*/
public FormRecognizerClient buildClient() {
return new FormRecognizerClient(buildAsyncClient());
}
/**
* Creates a {@link FormRecognizerAsyncClient} based on options set in the builder. Every time
* {@code buildAsyncClient()} is called a new instance of {@link FormRecognizerAsyncClient} is created.
*
* <p>
* If {@link
* {@link
* settings are ignored.
* </p>
*
* @return A FormRecognizerAsyncClient with the options set from the builder.
* @throws NullPointerException if {@link
* {@link
* @throws IllegalArgumentException if {@link
*/
/**
* Sets the service endpoint for the Azure Form Recognizer instance.
*
* @param endpoint The URL of the Azure Form Recognizer instance service requests to and receive responses from.
* @return The updated FormRecognizerClientImplBuilder object.
* @throws NullPointerException if {@code endpoint} is null
* @throws IllegalArgumentException if {@code endpoint} cannot be parsed into a valid URL.
*/
public FormRecognizerClientBuilder endpoint(String endpoint) {
Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
try {
new URL(endpoint);
} catch (MalformedURLException ex) {
throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL.", ex));
}
if (endpoint.endsWith("/")) {
this.endpoint = endpoint.substring(0, endpoint.length() - 1);
} else {
this.endpoint = endpoint;
}
return this;
}
/**
* Sets the credential to use when authenticating HTTP requests for this FormRecognizerClientImplBuilder.
*
* @param apiKeyCredential API key credential
*
* @return The updated FormRecognizerClientImplBuilder object.
* @throws NullPointerException If {@code apiKeyCredential} is {@code null}
*/
public FormRecognizerClientBuilder apiKey(AzureKeyCredential apiKeyCredential) {
this.credential = Objects.requireNonNull(apiKeyCredential, "'apiKeyCredential' cannot be null.");
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p> If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses.
*
* @return The updated FormRecognizerClientImplBuilder object.
*/
public FormRecognizerClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = logOptions;
return this;
}
/**
* Adds a policy to the set of existing policies that are executed after required policies.
*
* @param policy The retry policy for service requests.
*
* @return The updated FormRecognizerClientImplBuilder object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
public FormRecognizerClientBuilder addPolicy(HttpPipelinePolicy policy) {
policies.add(Objects.requireNonNull(policy, "'policy' cannot be null."));
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param client The HTTP client to use for requests.
* @return The updated FormRecognizerClientImplBuilder object.
*/
public FormRecognizerClientBuilder httpClient(HttpClient client) {
if (this.httpClient != null && client == null) {
logger.info("HttpClient is being set to 'null' when it was previously configured.");
}
this.httpClient = client;
return this;
}
/**
* Sets the HTTP pipeline to use for the service client.
* <p>
* If {@code pipeline} is set, all other settings are ignored, aside from
* {@link FormRecognizerClientBuilder
* {@link FormRecognizerClient}.
*
* @param httpPipeline The HTTP pipeline to use for sending service requests and receiving responses.
*
* @return The updated FormRecognizerClientImplBuilder object.
*/
public FormRecognizerClientBuilder pipeline(HttpPipeline httpPipeline) {
if (this.httpPipeline != null && httpPipeline == null) {
logger.info("HttpPipeline is being set to 'null' when it was previously configured.");
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the configuration store that is used during construction of the service client.
* <p>
* The default configuration store is a clone of the {@link Configuration
* configuration store}, use {@link Configuration
*
* @param configuration The configuration store used to
*
* @return The updated FormRecognizerClientImplBuilder object.
*/
public FormRecognizerClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the {@link RetryPolicy} that is used when each request is sent.
* <p>
* The default retry policy will be used if not provided {@link FormRecognizerClientBuilder
* to build {@link FormRecognizerAsyncClient} or {@link FormRecognizerClient}.
*
* @param retryPolicy user's retry policy applied to each request.
*
* @return The updated FormRecognizerClientImplBuilder object.
*/
public FormRecognizerClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Sets the {@link FormRecognizerServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version the client library will have the result of potentially moving to a newer service version.
*
* @param version {@link FormRecognizerServiceVersion} of the service to be used when making requests.
*
* @return The updated FormRecognizerClientImplBuilder object.
*/
public FormRecognizerClientBuilder serviceVersion(FormRecognizerServiceVersion version) {
this.version = version;
return this;
}
} | class FormRecognizerClientBuilder {
private static final String ECHO_REQUEST_ID_HEADER = "x-ms-return-client-request-id";
private static final String CONTENT_TYPE_HEADER_VALUE = ContentType.APPLICATION_JSON;
private static final String ACCEPT_HEADER = "Accept";
private static final String FORM_RECOGNIZER_PROPERTIES = "azure-ai-formrecognizer.properties";
static final String OCP_APIM_SUBSCRIPTION_KEY = "Ocp-Apim-Subscription-Key";
private static final String NAME = "name";
private static final String VERSION = "version";
private static final RetryPolicy DEFAULT_RETRY_POLICY = new RetryPolicy("retry-after-ms", ChronoUnit.MILLIS);
private final ClientLogger logger = new ClientLogger(FormRecognizerClientBuilder.class);
private final List<HttpPipelinePolicy> policies;
private final HttpHeaders headers;
private final String clientName;
private final String clientVersion;
private String endpoint;
private AzureKeyCredential credential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private HttpPipeline httpPipeline;
private Configuration configuration;
private RetryPolicy retryPolicy;
private FormRecognizerServiceVersion version;
/**
* The constructor with defaults.
*/
public FormRecognizerClientBuilder() {
policies = new ArrayList<>();
httpLogOptions = new HttpLogOptions();
Map<String, String> properties = CoreUtils.getProperties(FORM_RECOGNIZER_PROPERTIES);
clientName = properties.getOrDefault(NAME, "UnknownName");
clientVersion = properties.getOrDefault(VERSION, "UnknownVersion");
headers = new HttpHeaders()
.put(ECHO_REQUEST_ID_HEADER, "true")
.put(ACCEPT_HEADER, CONTENT_TYPE_HEADER_VALUE);
}
/**
* Creates a {@link FormRecognizerClient} based on options set in the builder. Every time
* {@code buildClient()} is called a new instance of {@link FormRecognizerClient} is created.
*
* <p>
* If {@link
* {@link
* settings are ignored
* </p>
*
* @return A FormRecognizerClient with the options set from the builder.
* @throws NullPointerException if {@link
* {@link
* @throws IllegalArgumentException if {@link
*/
public FormRecognizerClient buildClient() {
return new FormRecognizerClient(buildAsyncClient());
}
/**
* Creates a {@link FormRecognizerAsyncClient} based on options set in the builder. Every time
* {@code buildAsyncClient()} is called a new instance of {@link FormRecognizerAsyncClient} is created.
*
* <p>
* If {@link
* {@link
* settings are ignored.
* </p>
*
* @return A FormRecognizerAsyncClient with the options set from the builder.
* @throws NullPointerException if {@link
* {@link
* @throws IllegalArgumentException if {@link
*/
/**
* Sets the service endpoint for the Azure Form Recognizer instance.
*
* @param endpoint The URL of the Azure Form Recognizer instance service requests to and receive responses from.
* @return The updated FormRecognizerClientBuilder object.
* @throws NullPointerException if {@code endpoint} is null
* @throws IllegalArgumentException if {@code endpoint} cannot be parsed into a valid URL.
*/
public FormRecognizerClientBuilder endpoint(String endpoint) {
Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
try {
new URL(endpoint);
} catch (MalformedURLException ex) {
throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL.", ex));
}
if (endpoint.endsWith("/")) {
this.endpoint = endpoint.substring(0, endpoint.length() - 1);
} else {
this.endpoint = endpoint;
}
return this;
}
/**
* Sets the credential to use when authenticating HTTP requests for this FormRecognizerClientBuilder.
*
* @param apiKeyCredential API key credential
*
* @return The updated FormRecognizerClientBuilder object.
* @throws NullPointerException If {@code apiKeyCredential} is {@code null}
*/
public FormRecognizerClientBuilder apiKey(AzureKeyCredential apiKeyCredential) {
this.credential = Objects.requireNonNull(apiKeyCredential, "'apiKeyCredential' cannot be null.");
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p>If {@code logOptions} isn't provided, the default options will use {@link HttpLogDetailLevel
* which will prevent logging.</p>
*
* @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses.
*
* @return The updated FormRecognizerClientBuilder object.
*/
public FormRecognizerClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = logOptions;
return this;
}
/**
* Adds a policy to the set of existing policies that are executed after required policies.
*
* @param policy The retry policy for service requests.
*
* @return The updated FormRecognizerClientBuilder object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
public FormRecognizerClientBuilder addPolicy(HttpPipelinePolicy policy) {
policies.add(Objects.requireNonNull(policy, "'policy' cannot be null."));
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param client The HTTP client to use for requests.
* @return The updated FormRecognizerClientBuilder object.
*/
public FormRecognizerClientBuilder httpClient(HttpClient client) {
if (this.httpClient != null && client == null) {
logger.info("HttpClient is being set to 'null' when it was previously configured.");
}
this.httpClient = client;
return this;
}
/**
* Sets the HTTP pipeline to use for the service client.
* <p>
* If {@code pipeline} is set, all other settings are ignored, aside from
* {@link FormRecognizerClientBuilder
* {@link FormRecognizerClient}.
*
* @param httpPipeline The HTTP pipeline to use for sending service requests and receiving responses.
*
* @return The updated FormRecognizerClientBuilder object.
*/
public FormRecognizerClientBuilder pipeline(HttpPipeline httpPipeline) {
if (this.httpPipeline != null && httpPipeline == null) {
logger.info("HttpPipeline is being set to 'null' when it was previously configured.");
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the configuration store that is used during construction of the service client.
* <p>
* The default configuration store is a clone of the {@link Configuration
* configuration store}, use {@link Configuration
*
* @param configuration The configuration store used to
*
* @return The updated FormRecognizerClientBuilder object.
*/
public FormRecognizerClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the {@link RetryPolicy
* <p>
* The default retry policy will be used if not provided {@link FormRecognizerClientBuilder
* to build {@link FormRecognizerAsyncClient} or {@link FormRecognizerClient}.
*
* @param retryPolicy user's retry policy applied to each request.
*
* @return The updated FormRecognizerClientBuilder object.
*/
public FormRecognizerClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Sets the {@link FormRecognizerServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version the client library will have the result of potentially moving to a newer service version.
*
* @param version {@link FormRecognizerServiceVersion} of the service to be used when making requests.
*
* @return The updated FormRecognizerClientBuilder object.
*/
public FormRecognizerClientBuilder serviceVersion(FormRecognizerServiceVersion version) {
this.version = version;
return this;
}
} |
Should move this after the retry policy | public FormRecognizerAsyncClient buildAsyncClient() {
final Configuration buildConfiguration = (configuration == null)
? Configuration.getGlobalConfiguration().clone() : configuration;
final FormRecognizerServiceVersion serviceVersion =
version != null ? version : FormRecognizerServiceVersion.getLatest();
Objects.requireNonNull(endpoint, "'Endpoint' is required and can not be null.");
HttpPipeline pipeline = httpPipeline;
if (pipeline == null) {
final List<HttpPipelinePolicy> policies = new ArrayList<>();
if (credential != null) {
policies.add(new AzureKeyCredentialPolicy(OCP_APIM_SUBSCRIPTION_KEY, credential));
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
policies.add(new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion,
buildConfiguration));
policies.add(new RequestIdPolicy());
policies.add(new AddHeadersPolicy(headers));
policies.add(new AddDatePolicy());
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(retryPolicy == null ? DEFAULT_RETRY_POLICY : retryPolicy);
policies.addAll(this.policies);
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
pipeline = new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.build();
}
final FormRecognizerClientImpl formRecognizerAPI = new FormRecognizerClientImplBuilder()
.endpoint(endpoint)
.pipeline(pipeline)
.build();
return new FormRecognizerAsyncClient(formRecognizerAPI, serviceVersion);
} | policies.add(new AddDatePolicy()); | public FormRecognizerAsyncClient buildAsyncClient() {
Objects.requireNonNull(endpoint, "'Endpoint' is required and can not be null.");
final Configuration buildConfiguration = (configuration == null)
? Configuration.getGlobalConfiguration().clone() : configuration;
final FormRecognizerServiceVersion serviceVersion =
version != null ? version : FormRecognizerServiceVersion.getLatest();
HttpPipeline pipeline = httpPipeline;
if (pipeline == null) {
final List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion,
buildConfiguration));
policies.add(new RequestIdPolicy());
policies.add(new AddHeadersPolicy(headers));
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(retryPolicy == null ? DEFAULT_RETRY_POLICY : retryPolicy);
policies.add(new AddDatePolicy());
if (credential != null) {
policies.add(new AzureKeyCredentialPolicy(OCP_APIM_SUBSCRIPTION_KEY, credential));
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
policies.addAll(this.policies);
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
pipeline = new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.build();
}
final FormRecognizerClientImpl formRecognizerAPI = new FormRecognizerClientImplBuilder()
.endpoint(endpoint)
.pipeline(pipeline)
.build();
return new FormRecognizerAsyncClient(formRecognizerAPI, serviceVersion);
} | class FormRecognizerClientBuilder {
private static final String ECHO_REQUEST_ID_HEADER = "x-ms-return-client-request-id";
private static final String CONTENT_TYPE_HEADER_VALUE = "application/json";
private static final String ACCEPT_HEADER = "Accept";
private static final String FORM_RECOGNIZER_PROPERTIES = "azure-ai-formrecognizer.properties";
static final String OCP_APIM_SUBSCRIPTION_KEY = "Ocp-Apim-Subscription-Key";
private static final String NAME = "name";
private static final String VERSION = "version";
private static final RetryPolicy DEFAULT_RETRY_POLICY = new RetryPolicy("retry-after-ms", ChronoUnit.MILLIS);
private final ClientLogger logger = new ClientLogger(FormRecognizerClientBuilder.class);
private final List<HttpPipelinePolicy> policies;
private final HttpHeaders headers;
private final String clientName;
private final String clientVersion;
private String endpoint;
private AzureKeyCredential credential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private HttpPipeline httpPipeline;
private Configuration configuration;
private RetryPolicy retryPolicy;
private FormRecognizerServiceVersion version;
/**
* The constructor with defaults.
*/
public FormRecognizerClientBuilder() {
policies = new ArrayList<>();
httpLogOptions = new HttpLogOptions();
Map<String, String> properties = CoreUtils.getProperties(FORM_RECOGNIZER_PROPERTIES);
clientName = properties.getOrDefault(NAME, "UnknownName");
clientVersion = properties.getOrDefault(VERSION, "UnknownVersion");
headers = new HttpHeaders()
.put(ECHO_REQUEST_ID_HEADER, "true")
.put(ACCEPT_HEADER, CONTENT_TYPE_HEADER_VALUE);
}
/**
* Creates a {@link FormRecognizerClient} based on options set in the builder. Every time
* {@code buildClient()} is called a new instance of {@link FormRecognizerClient} is created.
*
* <p>
* If {@link
* {@link
* settings are ignored
* </p>
*
* @return A FormRecognizerClient with the options set from the builder.
* @throws NullPointerException if {@link
* {@link
* @throws IllegalArgumentException if {@link
*/
public FormRecognizerClient buildClient() {
return new FormRecognizerClient(buildAsyncClient());
}
/**
* Creates a {@link FormRecognizerAsyncClient} based on options set in the builder. Every time
* {@code buildAsyncClient()} is called a new instance of {@link FormRecognizerAsyncClient} is created.
*
* <p>
* If {@link
* {@link
* settings are ignored.
* </p>
*
* @return A FormRecognizerAsyncClient with the options set from the builder.
* @throws NullPointerException if {@link
* {@link
* @throws IllegalArgumentException if {@link
*/
/**
* Sets the service endpoint for the Azure Form Recognizer instance.
*
* @param endpoint The URL of the Azure Form Recognizer instance service requests to and receive responses from.
* @return The updated FormRecognizerClientImplBuilder object.
* @throws NullPointerException if {@code endpoint} is null
* @throws IllegalArgumentException if {@code endpoint} cannot be parsed into a valid URL.
*/
public FormRecognizerClientBuilder endpoint(String endpoint) {
Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
try {
new URL(endpoint);
} catch (MalformedURLException ex) {
throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL.", ex));
}
if (endpoint.endsWith("/")) {
this.endpoint = endpoint.substring(0, endpoint.length() - 1);
} else {
this.endpoint = endpoint;
}
return this;
}
/**
* Sets the credential to use when authenticating HTTP requests for this FormRecognizerClientImplBuilder.
*
* @param apiKeyCredential API key credential
*
* @return The updated FormRecognizerClientImplBuilder object.
* @throws NullPointerException If {@code apiKeyCredential} is {@code null}
*/
public FormRecognizerClientBuilder apiKey(AzureKeyCredential apiKeyCredential) {
this.credential = Objects.requireNonNull(apiKeyCredential, "'apiKeyCredential' cannot be null.");
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p> If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses.
*
* @return The updated FormRecognizerClientImplBuilder object.
*/
public FormRecognizerClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = logOptions;
return this;
}
/**
* Adds a policy to the set of existing policies that are executed after required policies.
*
* @param policy The retry policy for service requests.
*
* @return The updated FormRecognizerClientImplBuilder object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
public FormRecognizerClientBuilder addPolicy(HttpPipelinePolicy policy) {
policies.add(Objects.requireNonNull(policy, "'policy' cannot be null."));
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param client The HTTP client to use for requests.
* @return The updated FormRecognizerClientImplBuilder object.
*/
public FormRecognizerClientBuilder httpClient(HttpClient client) {
if (this.httpClient != null && client == null) {
logger.info("HttpClient is being set to 'null' when it was previously configured.");
}
this.httpClient = client;
return this;
}
/**
* Sets the HTTP pipeline to use for the service client.
* <p>
* If {@code pipeline} is set, all other settings are ignored, aside from
* {@link FormRecognizerClientBuilder
* {@link FormRecognizerClient}.
*
* @param httpPipeline The HTTP pipeline to use for sending service requests and receiving responses.
*
* @return The updated FormRecognizerClientImplBuilder object.
*/
public FormRecognizerClientBuilder pipeline(HttpPipeline httpPipeline) {
if (this.httpPipeline != null && httpPipeline == null) {
logger.info("HttpPipeline is being set to 'null' when it was previously configured.");
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the configuration store that is used during construction of the service client.
* <p>
* The default configuration store is a clone of the {@link Configuration
* configuration store}, use {@link Configuration
*
* @param configuration The configuration store used to
*
* @return The updated FormRecognizerClientImplBuilder object.
*/
public FormRecognizerClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the {@link RetryPolicy} that is used when each request is sent.
* <p>
* The default retry policy will be used if not provided {@link FormRecognizerClientBuilder
* to build {@link FormRecognizerAsyncClient} or {@link FormRecognizerClient}.
*
* @param retryPolicy user's retry policy applied to each request.
*
* @return The updated FormRecognizerClientImplBuilder object.
*/
public FormRecognizerClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Sets the {@link FormRecognizerServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version the client library will have the result of potentially moving to a newer service version.
*
* @param version {@link FormRecognizerServiceVersion} of the service to be used when making requests.
*
* @return The updated FormRecognizerClientImplBuilder object.
*/
public FormRecognizerClientBuilder serviceVersion(FormRecognizerServiceVersion version) {
this.version = version;
return this;
}
} | class FormRecognizerClientBuilder {
private static final String ECHO_REQUEST_ID_HEADER = "x-ms-return-client-request-id";
private static final String CONTENT_TYPE_HEADER_VALUE = ContentType.APPLICATION_JSON;
private static final String ACCEPT_HEADER = "Accept";
private static final String FORM_RECOGNIZER_PROPERTIES = "azure-ai-formrecognizer.properties";
static final String OCP_APIM_SUBSCRIPTION_KEY = "Ocp-Apim-Subscription-Key";
private static final String NAME = "name";
private static final String VERSION = "version";
private static final RetryPolicy DEFAULT_RETRY_POLICY = new RetryPolicy("retry-after-ms", ChronoUnit.MILLIS);
private final ClientLogger logger = new ClientLogger(FormRecognizerClientBuilder.class);
private final List<HttpPipelinePolicy> policies;
private final HttpHeaders headers;
private final String clientName;
private final String clientVersion;
private String endpoint;
private AzureKeyCredential credential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private HttpPipeline httpPipeline;
private Configuration configuration;
private RetryPolicy retryPolicy;
private FormRecognizerServiceVersion version;
/**
* The constructor with defaults.
*/
public FormRecognizerClientBuilder() {
policies = new ArrayList<>();
httpLogOptions = new HttpLogOptions();
Map<String, String> properties = CoreUtils.getProperties(FORM_RECOGNIZER_PROPERTIES);
clientName = properties.getOrDefault(NAME, "UnknownName");
clientVersion = properties.getOrDefault(VERSION, "UnknownVersion");
headers = new HttpHeaders()
.put(ECHO_REQUEST_ID_HEADER, "true")
.put(ACCEPT_HEADER, CONTENT_TYPE_HEADER_VALUE);
}
/**
* Creates a {@link FormRecognizerClient} based on options set in the builder. Every time
* {@code buildClient()} is called a new instance of {@link FormRecognizerClient} is created.
*
* <p>
* If {@link
* {@link
* settings are ignored
* </p>
*
* @return A FormRecognizerClient with the options set from the builder.
* @throws NullPointerException if {@link
* {@link
* @throws IllegalArgumentException if {@link
*/
public FormRecognizerClient buildClient() {
return new FormRecognizerClient(buildAsyncClient());
}
/**
* Creates a {@link FormRecognizerAsyncClient} based on options set in the builder. Every time
* {@code buildAsyncClient()} is called a new instance of {@link FormRecognizerAsyncClient} is created.
*
* <p>
* If {@link
* {@link
* settings are ignored.
* </p>
*
* @return A FormRecognizerAsyncClient with the options set from the builder.
* @throws NullPointerException if {@link
* {@link
* @throws IllegalArgumentException if {@link
*/
/**
* Sets the service endpoint for the Azure Form Recognizer instance.
*
* @param endpoint The URL of the Azure Form Recognizer instance service requests to and receive responses from.
* @return The updated FormRecognizerClientBuilder object.
* @throws NullPointerException if {@code endpoint} is null
* @throws IllegalArgumentException if {@code endpoint} cannot be parsed into a valid URL.
*/
public FormRecognizerClientBuilder endpoint(String endpoint) {
Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
try {
new URL(endpoint);
} catch (MalformedURLException ex) {
throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL.", ex));
}
if (endpoint.endsWith("/")) {
this.endpoint = endpoint.substring(0, endpoint.length() - 1);
} else {
this.endpoint = endpoint;
}
return this;
}
/**
* Sets the credential to use when authenticating HTTP requests for this FormRecognizerClientBuilder.
*
* @param apiKeyCredential API key credential
*
* @return The updated FormRecognizerClientBuilder object.
* @throws NullPointerException If {@code apiKeyCredential} is {@code null}
*/
public FormRecognizerClientBuilder apiKey(AzureKeyCredential apiKeyCredential) {
this.credential = Objects.requireNonNull(apiKeyCredential, "'apiKeyCredential' cannot be null.");
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p>If {@code logOptions} isn't provided, the default options will use {@link HttpLogDetailLevel
* which will prevent logging.</p>
*
* @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses.
*
* @return The updated FormRecognizerClientBuilder object.
*/
public FormRecognizerClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = logOptions;
return this;
}
/**
* Adds a policy to the set of existing policies that are executed after required policies.
*
* @param policy The retry policy for service requests.
*
* @return The updated FormRecognizerClientBuilder object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
public FormRecognizerClientBuilder addPolicy(HttpPipelinePolicy policy) {
policies.add(Objects.requireNonNull(policy, "'policy' cannot be null."));
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param client The HTTP client to use for requests.
* @return The updated FormRecognizerClientBuilder object.
*/
public FormRecognizerClientBuilder httpClient(HttpClient client) {
if (this.httpClient != null && client == null) {
logger.info("HttpClient is being set to 'null' when it was previously configured.");
}
this.httpClient = client;
return this;
}
/**
* Sets the HTTP pipeline to use for the service client.
* <p>
* If {@code pipeline} is set, all other settings are ignored, aside from
* {@link FormRecognizerClientBuilder
* {@link FormRecognizerClient}.
*
* @param httpPipeline The HTTP pipeline to use for sending service requests and receiving responses.
*
* @return The updated FormRecognizerClientBuilder object.
*/
public FormRecognizerClientBuilder pipeline(HttpPipeline httpPipeline) {
if (this.httpPipeline != null && httpPipeline == null) {
logger.info("HttpPipeline is being set to 'null' when it was previously configured.");
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the configuration store that is used during construction of the service client.
* <p>
* The default configuration store is a clone of the {@link Configuration
* configuration store}, use {@link Configuration
*
* @param configuration The configuration store used to
*
* @return The updated FormRecognizerClientBuilder object.
*/
public FormRecognizerClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the {@link RetryPolicy
* <p>
* The default retry policy will be used if not provided {@link FormRecognizerClientBuilder
* to build {@link FormRecognizerAsyncClient} or {@link FormRecognizerClient}.
*
* @param retryPolicy user's retry policy applied to each request.
*
* @return The updated FormRecognizerClientBuilder object.
*/
public FormRecognizerClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Sets the {@link FormRecognizerServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version the client library will have the result of potentially moving to a newer service version.
*
* @param version {@link FormRecognizerServiceVersion} of the service to be used when making requests.
*
* @return The updated FormRecognizerClientBuilder object.
*/
public FormRecognizerClientBuilder serviceVersion(FormRecognizerServiceVersion version) {
this.version = version;
return this;
}
} |
Is it likely that there will be additional keys added in the future? If so, what is the plan to ensure this remains up to date when changes are made? Is there any possibility this could be made more generic and flexible to support additional keys? | static IterableStream<ExtractedReceipt> toReceipt(AnalyzeResult analyzeResult, boolean includeTextDetails) {
List<ReadResult> readResults = analyzeResult.getReadResults();
List<DocumentResult> documentResult = analyzeResult.getDocumentResults();
List<ExtractedReceipt> extractedReceiptList = new ArrayList<>();
for (int i = 0; i < readResults.size(); i++) {
ReadResult readResultItem = readResults.get(i);
PageMetadata pageMetadata = getPageInfo(readResultItem);
ExtractedReceipt extractedReceiptItem = new ExtractedReceipt(pageMetadata);
DocumentResult documentResultItem = documentResult.get(i);
documentResultItem.getFields().forEach((key, fieldValue) -> {
switch (key) {
case "ReceiptType":
extractedReceiptItem.setReceiptType(new ReceiptType(fieldValue.getValueString(),
fieldValue.getConfidence()));
break;
case "MerchantName":
extractedReceiptItem.setMerchantName(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "MerchantAddress":
extractedReceiptItem.setMerchantAddress(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "MerchantPhoneNumber":
extractedReceiptItem.setMerchantPhoneNumber(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Subtotal":
extractedReceiptItem.setSubtotal(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Tax":
extractedReceiptItem.setTax(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Tip":
extractedReceiptItem.setTip(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Total":
extractedReceiptItem.setTotal(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "TransactionDate":
extractedReceiptItem.setTransactionDate(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "TransactionTime":
extractedReceiptItem.setTransactionTime(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Items":
extractedReceiptItem.setReceiptItems(toReceiptItems(fieldValue.getValueArray(), readResults, includeTextDetails));
break;
default:
break;
}
});
extractedReceiptList.add(extractedReceiptItem);
}
return new IterableStream<>(extractedReceiptList);
} | break; | static IterableStream<ExtractedReceipt> toReceipt(AnalyzeResult analyzeResult, boolean includeTextDetails) {
List<ReadResult> readResults = analyzeResult.getReadResults();
List<DocumentResult> documentResult = analyzeResult.getDocumentResults();
List<ExtractedReceipt> extractedReceiptList = new ArrayList<>();
for (int i = 0; i < readResults.size(); i++) {
ReadResult readResultItem = readResults.get(i);
PageMetadata pageMetadata = getPageInfo(readResultItem);
PageRange pageRange = null;
DocumentResult documentResultItem = documentResult.get(i);
List<Integer> receiptPageRange = documentResultItem.getPageRange();
if (receiptPageRange.size() == 2) {
pageRange = new PageRange(receiptPageRange.get(0), receiptPageRange.get(1));
}
ExtractedReceipt extractedReceiptItem = new ExtractedReceipt(pageMetadata, pageRange);
Map<String, FieldValue<?>> extractedFieldMap = new HashMap<>();
documentResultItem.getFields().forEach((key, fieldValue) -> {
switch (key) {
case "ReceiptType":
extractedReceiptItem.setReceiptType(new ReceiptType(fieldValue.getValueString(),
fieldValue.getConfidence()));
break;
case "MerchantName":
extractedReceiptItem.setMerchantName(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "MerchantAddress":
extractedReceiptItem.setMerchantAddress(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "MerchantPhoneNumber":
extractedReceiptItem.setMerchantPhoneNumber(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Subtotal":
extractedReceiptItem.setSubtotal(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Tax":
extractedReceiptItem.setTax(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Tip":
extractedReceiptItem.setTip(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Total":
extractedReceiptItem.setTotal(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "TransactionDate":
extractedReceiptItem.setTransactionDate(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "TransactionTime":
extractedReceiptItem.setTransactionTime(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Items":
extractedReceiptItem.setReceiptItems(toReceiptItems(fieldValue.getValueArray(), readResults, includeTextDetails));
break;
default:
extractedFieldMap.putIfAbsent(key, setFieldValue(fieldValue, readResults, includeTextDetails));
break;
}
});
extractedReceiptItem.setExtractedFields(extractedFieldMap);
extractedReceiptList.add(extractedReceiptItem);
}
return new IterableStream<>(extractedReceiptList);
} | class Transforms {
private static final ClientLogger LOGGER = new ClientLogger(Transforms.class);
private static final Pattern COMPILE = Pattern.compile("[^0-9]+");
private Transforms() {
}
private static FieldValue<?> setFieldValue(com.azure.ai.formrecognizer.implementation.models.FieldValue fieldValue,
List<ReadResult> readResults, boolean includeTextDetails) {
FieldValue<?> value;
switch (fieldValue.getType()) {
case PHONE_NUMBER:
case STRING:
case TIME:
case DATE:
value = toFieldValueString(fieldValue);
break;
case INTEGER:
value = toFieldValueInteger(fieldValue);
break;
case NUMBER:
value = toFieldValueNumber(fieldValue);
break;
case ARRAY:
case OBJECT:
default:
throw LOGGER.logExceptionAsError(new RuntimeException("FieldValue Type not supported"));
}
if (includeTextDetails) {
value.setElements(setReferenceElements(readResults, fieldValue.getElements()));
}
return value;
}
private static PageMetadata getPageInfo(ReadResult readResultItem) {
return new PageMetadata(TextLanguage.fromString(readResultItem.getLanguage().toString()),
readResultItem.getHeight(), readResultItem.getPage(), readResultItem.getWidth(),
readResultItem.getAngle(), DimensionUnit.fromString(readResultItem.getUnit().toString()));
}
private static List<Element> setReferenceElements(List<ReadResult> readResults, List<String> elements) {
List<Element> elementList = new ArrayList<>();
elements.forEach(elementString -> {
String[] indices = COMPILE.matcher(elementString).replaceAll(" ").trim().split(" ");
int readResultIndex, lineIndex;
if (indices.length >= 1) {
readResultIndex = Integer.parseInt(indices[0]);
lineIndex = Integer.parseInt(indices[1]);
} else {
throw LOGGER.logExceptionAsError(new RuntimeException("Reference Elements not found"));
}
if (indices.length == 3) {
int wordIndex = Integer.parseInt(indices[2]);
TextWord textWord = readResults.get(readResultIndex).getLines().get(lineIndex).getWords()
.get(wordIndex);
WordElement wordElement = new WordElement(textWord.getText(), toBoundingBox(textWord.getBoundingBox()));
elementList.add(wordElement);
} else {
TextLine textLine = readResults.get(readResultIndex).getLines().get(lineIndex);
LineElement lineElement = new LineElement(textLine.getText(), toBoundingBox(textLine.getBoundingBox()));
elementList.add(lineElement);
}
});
return elementList;
}
private static BoundingBox toBoundingBox(List<Float> boundingBox) {
List<Point> pointList = new ArrayList<>();
for (int i = 0; i < boundingBox.size(); i += 2) {
Point point = new Point(boundingBox.get(i), boundingBox.get(i + 1));
pointList.add(point);
}
return new BoundingBox(pointList);
}
private static List<ReceiptItem> toReceiptItems(
List<com.azure.ai.formrecognizer.implementation.models.FieldValue> fieldValue, List<ReadResult> readResults, boolean includeTextDetails) {
List<ReceiptItem> receiptItemList = new ArrayList<>();
fieldValue.forEach(fieldValue1 -> {
ReceiptItem receiptItem = new ReceiptItem();
fieldValue1.getValueObject().forEach((key, fieldValue2) -> {
switch (key) {
case "Quantity":
receiptItem.setQuantity(setFieldValue(fieldValue2, readResults, includeTextDetails));
break;
case "Name":
receiptItem.setName(setFieldValue(fieldValue2, readResults, includeTextDetails));
break;
case "TotalPrice":
receiptItem.setTotalPrice(setFieldValue(fieldValue2, readResults, includeTextDetails));
break;
default:
break;
}
});
receiptItemList.add(receiptItem);
});
return receiptItemList;
}
private static IntegerValue toFieldValueInteger(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceIntegerValue) {
if (serviceIntegerValue.getValueNumber() != null) {
return new IntegerValue(serviceIntegerValue.getText(), toBoundingBox(serviceIntegerValue.getBoundingBox()),
serviceIntegerValue.getValueInteger());
}
return new IntegerValue(serviceIntegerValue.getText(), toBoundingBox(serviceIntegerValue.getBoundingBox()), 0);
}
private static StringValue toFieldValueString(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceStringValue) {
return new StringValue(serviceStringValue.getText(), toBoundingBox(serviceStringValue.getBoundingBox()),
serviceStringValue.getValueString());
}
private static FloatValue toFieldValueNumber(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceFloatValue) {
return new FloatValue(serviceFloatValue.getText(), toBoundingBox(serviceFloatValue.getBoundingBox()),
serviceFloatValue.getValueNumber());
}
} | class Transforms {
private static final ClientLogger LOGGER = new ClientLogger(Transforms.class);
private static final Pattern COMPILE = Pattern.compile("[^0-9]+");
private Transforms() {
}
/**
* Helper method to convert the {@link com.azure.ai.formrecognizer.implementation.models.AnalyzeOperationResult}
* service level receipt model to list of {@link ExtractedReceipt}.
*
* @param analyzeResult The result of the analyze receipt operation returned by the service.
* @param includeTextDetails When set to true, a list of references to the text elements is returned in the read result.
*
* @return A list of {@link ExtractedReceipt} to represent the list of extracted receipt information.
*/
/**
* Helper method that converts the incoming service field value to one of the strongly typed SDK level {@link FieldValue} with
* reference elements set when {@code includeTextDetails} is set to true.
*
* @param fieldValue The named field values returned by the service.
* @param readResults The result containing the list of element references when includeTextDetails is set to true.
* @param includeTextDetails When set to true, a list of references to the text elements is returned in the read result.
*
* @return The strongly typed {@link FieldValue} for the field input.
*/
private static FieldValue<?> setFieldValue(com.azure.ai.formrecognizer.implementation.models.FieldValue fieldValue,
List<ReadResult> readResults, boolean includeTextDetails) {
FieldValue<?> value;
switch (fieldValue.getType()) {
case PHONE_NUMBER:
value = toFieldValuePhoneNumber(fieldValue);
break;
case STRING:
value = toFieldValueString(fieldValue);
break;
case TIME:
value = toFieldValueTime(fieldValue);
break;
case DATE:
value = toFieldValueDate(fieldValue);
break;
case INTEGER:
value = toFieldValueInteger(fieldValue);
break;
case NUMBER:
value = toFieldValueNumber(fieldValue);
break;
case ARRAY:
case OBJECT:
default:
throw LOGGER.logExceptionAsError(new RuntimeException("FieldValue Type not supported"));
}
if (includeTextDetails) {
value.setElements(setReferenceElements(readResults, fieldValue.getElements()));
}
return value;
}
/**
* Helper method that converts the service returned page information to SDK model {@link PageMetadata}.
*
* @param readResultItem A read result item returned from the service containing the page information for provided
* input.
*
* @return The {@link PageMetadata} for the receipt page.
*/
private static PageMetadata getPageInfo(ReadResult readResultItem) {
return new PageMetadata(readResultItem.getHeight(), readResultItem.getPage(), readResultItem.getWidth(),
readResultItem.getAngle(), DimensionUnit.fromString(readResultItem.getUnit().toString()));
}
/**
* Helper method to set the text reference elements on FieldValue/fields when {@code includeTextDetails} set to true.
*
* @param readResults The ReadResult containing the resolved references for text elements.
* @param elements When includeTextDetails is set to true, a list of references to the text
* elements constituting this field value.
*
* @return The updated {@link FieldValue} object with list if referenced elements.
*/
private static List<Element> setReferenceElements(List<ReadResult> readResults, List<String> elements) {
List<Element> elementList = new ArrayList<>();
elements.forEach(elementString -> {
String[] indices = COMPILE.matcher(elementString).replaceAll(" ").trim().split(" ");
int readResultIndex, lineIndex;
if (indices.length >= 1) {
readResultIndex = Integer.parseInt(indices[0]);
lineIndex = Integer.parseInt(indices[1]);
} else {
throw LOGGER.logExceptionAsError(new RuntimeException("Reference Elements not found"));
}
if (indices.length == 3) {
int wordIndex = Integer.parseInt(indices[2]);
TextWord textWord = readResults.get(readResultIndex).getLines().get(lineIndex).getWords()
.get(wordIndex);
WordElement wordElement = new WordElement(textWord.getText(), toBoundingBox(textWord.getBoundingBox()));
elementList.add(wordElement);
} else {
TextLine textLine = readResults.get(readResultIndex).getLines().get(lineIndex);
LineElement lineElement = new LineElement(textLine.getText(), toBoundingBox(textLine.getBoundingBox()));
elementList.add(lineElement);
}
});
return elementList;
}
/**
* Helper method to convert the service level modeled eight numbers representing the four points to SDK level
* {@link BoundingBox}.
*
* @param boundingBox A list of eight numbers representing the four points of a box.
*
* @return A {@link BoundingBox}.
*/
private static BoundingBox toBoundingBox(List<Float> boundingBox) {
BoundingBox boundingBox1;
if (boundingBox.size() == 8) {
Point topLeft = new Point(boundingBox.get(0), boundingBox.get(1));
Point topRight = new Point(boundingBox.get(2), boundingBox.get(3));
Point bottomLeft = new Point(boundingBox.get(4), boundingBox.get(5));
Point bottomRight = new Point(boundingBox.get(6), boundingBox.get(7));
boundingBox1 = new BoundingBox(topLeft, topRight, bottomLeft, bottomRight);
} else {
return null;
}
return boundingBox1;
}
/**
* Helper method to convert the service level {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to SDK level {@link ReceiptItem receipt items}.
*
* @param fieldValueItems The named field values returned by the service.
* @param readResults The result containing the list of element references when includeTextDetails is set to true.
* @param includeTextDetails When set to true, a list of references to the text elements is returned in the read result.
*
* @return A list of {@link ReceiptItem}.
*/
private static List<ReceiptItem> toReceiptItems(
List<com.azure.ai.formrecognizer.implementation.models.FieldValue> fieldValueItems, List<ReadResult> readResults, boolean includeTextDetails) {
List<ReceiptItem> receiptItemList = new ArrayList<>();
for (com.azure.ai.formrecognizer.implementation.models.FieldValue eachFieldValue : fieldValueItems) {
ReceiptItem receiptItem = new ReceiptItem();
for (ReceiptItemType key : ReceiptItemType.values()) {
com.azure.ai.formrecognizer.implementation.models.FieldValue item = eachFieldValue.getValueObject().get(key.toString());
if (QUANTITY.equals(key) && item != null) {
receiptItem.setQuantity(setFieldValue(item, readResults, includeTextDetails));
} else if (NAME.equals(key) && item != null) {
receiptItem.setName(setFieldValue(item, readResults, includeTextDetails));
} else if (PRICE.equals(key) && item != null) {
receiptItem.setPrice(setFieldValue(item, readResults, includeTextDetails));
} else if (TOTAL_PRICE.equals(key) && item != null) {
receiptItem.setTotalPrice(setFieldValue(item, readResults, includeTextDetails));
}
}
receiptItemList.add(receiptItem);
}
return receiptItemList;
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link IntegerValue}
*
* @param serviceIntegerValue The integer value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link IntegerValue}.
*/
private static IntegerValue toFieldValueInteger(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceIntegerValue) {
if (serviceIntegerValue.getValueNumber() != null) {
return new IntegerValue(serviceIntegerValue.getText(), toBoundingBox(serviceIntegerValue.getBoundingBox()),
serviceIntegerValue.getValueInteger(), serviceIntegerValue.getPage());
}
return new IntegerValue(serviceIntegerValue.getText(), toBoundingBox(serviceIntegerValue.getBoundingBox()), null, serviceIntegerValue.getPage());
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link StringValue}.
*
* @param serviceStringValue The string value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link StringValue}.
*/
private static StringValue toFieldValueString(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceStringValue) {
return new StringValue(serviceStringValue.getText(), toBoundingBox(serviceStringValue.getBoundingBox()),
serviceStringValue.getValueString(), serviceStringValue.getPage());
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link FloatValue}.
*
* @param serviceFloatValue The float value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link FloatValue}.
*/
private static FloatValue toFieldValueNumber(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceFloatValue) {
if (serviceFloatValue.getValueNumber() != null) {
return new FloatValue(serviceFloatValue.getText(), toBoundingBox(serviceFloatValue.getBoundingBox()),
serviceFloatValue.getValueNumber(), serviceFloatValue.getPage());
}
return new FloatValue(serviceFloatValue.getText(), toBoundingBox(serviceFloatValue.getBoundingBox()), null, serviceFloatValue.getPage());
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link StringValue}.
*
* @param serviceDateValue The string value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link StringValue}.
*/
private static StringValue toFieldValuePhoneNumber(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceDateValue) {
return new StringValue(serviceDateValue.getText(), toBoundingBox(serviceDateValue.getBoundingBox()),
serviceDateValue.getValuePhoneNumber(), serviceDateValue.getPage());
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link DateValue}.
*
* @param serviceDateValue The string value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link StringValue}.
*/
private static DateValue toFieldValueDate(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceDateValue) {
return new DateValue(serviceDateValue.getText(), toBoundingBox(serviceDateValue.getBoundingBox()),
serviceDateValue.getValueDate(), serviceDateValue.getPage());
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link TimeValue}.
*
* @param serviceDateValue The string value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link TimeValue}.
*
*/
private static TimeValue toFieldValueTime(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceDateValue) {
return new TimeValue(serviceDateValue.getText(), toBoundingBox(serviceDateValue.getBoundingBox()),
serviceDateValue.getValueTime(), serviceDateValue.getPage());
}
} |
Do we want to convert the entire `InputStream` to a `ByteBuffer` in a single go or should this be chunked as downstream requests more elements? Could we have scenarios where a large file is being read that wouldn't fit into memory? | private static ByteBuffer toByteArray(InputStream in) throws IOException {
ByteArrayOutputStream os = new ByteArrayOutputStream();
byte[] buffer = new byte[1024];
int len;
while ((len = in.read(buffer)) != -1) {
os.write(buffer, 0, len);
}
return ByteBuffer.wrap(os.toByteArray());
} | } | private static ByteBuffer toByteArray(InputStream in) {
try {
ByteArrayOutputStream os = new ByteArrayOutputStream();
byte[] buffer = new byte[1024];
int len;
while ((len = in.read(buffer)) != -1) {
os.write(buffer, 0, len);
}
return ByteBuffer.wrap(os.toByteArray());
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
/**
* A utility method for converting the input stream to Flux of ByteBuffer.
*
* @param data The input data which needs to convert to ByteBuffer.
*
* @return {@link ByteBuffer} which contains the input data.
* @throws RuntimeException When I/O error occurs.
*/
public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data) {
try {
return Flux.just(toByteArray(data));
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
private Utility() {
}
/**
* A utility method for converting the input stream to Flux of ByteBuffer.
*
* @param data The input data which needs to convert to ByteBuffer.
*
* @return {@link ByteBuffer} which contains the input data.
* @throws RuntimeException When I/O error occurs.
*/
public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data) {
return Flux.just(toByteArray(data))
.doOnError(error -> LOGGER.warning("Failed to convert stream to byte array - {}", error));
}
} |
Should put in a fake key, this will throw an exception. | public static void main(final String[] args) {
final FormRecognizerAsyncClient client = new FormRecognizerClientBuilder()
.apiKey(new AzureKeyCredential(""))
.endpoint("https:
.buildAsyncClient();
String receiptUrl = "https:
PollerFlux<OperationResult, IterableStream<ExtractedReceipt>> analyzeReceiptPoller =
client.beginExtractReceipt(receiptUrl, true, Duration.ofSeconds(1));
IterableStream<ExtractedReceipt> receiptPageResults = analyzeReceiptPoller
.last()
.flatMap(trainingOperationResponse -> {
try {
Thread.sleep(20000);
} catch (InterruptedException e) {
e.printStackTrace();
}
if (trainingOperationResponse.getStatus().isComplete()) {
System.out.println("Polling completed successfully");
return trainingOperationResponse.getFinalResult();
} else {
System.out.println("polling completed unsuccessfully with status:"
+ trainingOperationResponse.getStatus());
return Mono.empty();
}
}).block();
for (ExtractedReceipt extractedReceiptItem : receiptPageResults) {
System.out.printf("Page Number %s%n", extractedReceiptItem.getPageMetadata().getPageNumber());
System.out.printf("Merchant Name %s%n", extractedReceiptItem.getMerchantName().getText());
System.out.printf("Merchant Address %s%n", extractedReceiptItem.getMerchantAddress().getText());
System.out.printf("Merchant Phone Number %s%n", extractedReceiptItem.getMerchantPhoneNumber().getText());
System.out.printf("Total: %s%n", extractedReceiptItem.getTotal().getText());
System.out.printf("Receipt Items: %n");
extractedReceiptItem.getReceiptItems().forEach(receiptItem -> {
System.out.printf("Name: %s%n", receiptItem.getName().getText());
System.out.printf("Quantity: %s%n", receiptItem.getQuantity().getText());
System.out.printf("Total Price: %s%n", receiptItem.getTotalPrice().getText());
System.out.println();
});
}
} | .apiKey(new AzureKeyCredential("")) | public static void main(final String[] args) {
FormRecognizerAsyncClient client = new FormRecognizerClientBuilder()
.apiKey(new AzureKeyCredential("{api_key}"))
.endpoint("https:
.buildAsyncClient();
String receiptUrl = "https:
PollerFlux<OperationResult, IterableStream<ExtractedReceipt>> analyzeReceiptPoller =
client.beginExtractReceiptsFromUrl(receiptUrl);
IterableStream<ExtractedReceipt> receiptPageResults = analyzeReceiptPoller
.last()
.flatMap(trainingOperationResponse -> {
if (trainingOperationResponse.getStatus().isComplete()) {
System.out.println("Polling completed successfully");
return trainingOperationResponse.getFinalResult();
} else {
System.out.println("polling completed unsuccessfully with status:"
+ trainingOperationResponse.getStatus());
return Mono.empty();
}
}).block();
receiptPageResults.forEach(extractedReceiptItem -> {
System.out.printf("Page Number %s%n", extractedReceiptItem.getPageMetadata().getPageNumber());
System.out.printf("Merchant Name %s%n", extractedReceiptItem.getMerchantName().getText());
System.out.printf("Merchant Address %s%n", extractedReceiptItem.getMerchantAddress().getText());
System.out.printf("Merchant Phone Number %s%n", extractedReceiptItem.getMerchantPhoneNumber().getText());
System.out.printf("Total: %s%n", extractedReceiptItem.getTotal().getText());
System.out.printf("Receipt Items: %n");
extractedReceiptItem.getReceiptItems().forEach(receiptItem -> {
System.out.printf("Name: %s%n", receiptItem.getName().getText());
System.out.printf("Quantity: %s%n", receiptItem.getQuantity().getText());
System.out.printf("Total Price: %s%n", receiptItem.getTotalPrice().getText());
System.out.println();
});
});
} | class ExtractPrebuiltReceiptAsync {
} | class ExtractPrebuiltReceiptAsync {
/**
* Sample for extracting receipt information using input stream.
*
* @param args Unused. Arguments to the program.
*/
} |
Why do we sleep here for 20 seconds? The captured `trainingOperationResponse` won't change during the sleep period. | public static void main(final String[] args) {
final FormRecognizerAsyncClient client = new FormRecognizerClientBuilder()
.apiKey(new AzureKeyCredential(""))
.endpoint("https:
.buildAsyncClient();
String receiptUrl = "https:
PollerFlux<OperationResult, IterableStream<ExtractedReceipt>> analyzeReceiptPoller =
client.beginExtractReceipt(receiptUrl, true, Duration.ofSeconds(1));
IterableStream<ExtractedReceipt> receiptPageResults = analyzeReceiptPoller
.last()
.flatMap(trainingOperationResponse -> {
try {
Thread.sleep(20000);
} catch (InterruptedException e) {
e.printStackTrace();
}
if (trainingOperationResponse.getStatus().isComplete()) {
System.out.println("Polling completed successfully");
return trainingOperationResponse.getFinalResult();
} else {
System.out.println("polling completed unsuccessfully with status:"
+ trainingOperationResponse.getStatus());
return Mono.empty();
}
}).block();
for (ExtractedReceipt extractedReceiptItem : receiptPageResults) {
System.out.printf("Page Number %s%n", extractedReceiptItem.getPageMetadata().getPageNumber());
System.out.printf("Merchant Name %s%n", extractedReceiptItem.getMerchantName().getText());
System.out.printf("Merchant Address %s%n", extractedReceiptItem.getMerchantAddress().getText());
System.out.printf("Merchant Phone Number %s%n", extractedReceiptItem.getMerchantPhoneNumber().getText());
System.out.printf("Total: %s%n", extractedReceiptItem.getTotal().getText());
System.out.printf("Receipt Items: %n");
extractedReceiptItem.getReceiptItems().forEach(receiptItem -> {
System.out.printf("Name: %s%n", receiptItem.getName().getText());
System.out.printf("Quantity: %s%n", receiptItem.getQuantity().getText());
System.out.printf("Total Price: %s%n", receiptItem.getTotalPrice().getText());
System.out.println();
});
}
} | Thread.sleep(20000); | public static void main(final String[] args) {
FormRecognizerAsyncClient client = new FormRecognizerClientBuilder()
.apiKey(new AzureKeyCredential("{api_key}"))
.endpoint("https:
.buildAsyncClient();
String receiptUrl = "https:
PollerFlux<OperationResult, IterableStream<ExtractedReceipt>> analyzeReceiptPoller =
client.beginExtractReceiptsFromUrl(receiptUrl);
IterableStream<ExtractedReceipt> receiptPageResults = analyzeReceiptPoller
.last()
.flatMap(trainingOperationResponse -> {
if (trainingOperationResponse.getStatus().isComplete()) {
System.out.println("Polling completed successfully");
return trainingOperationResponse.getFinalResult();
} else {
System.out.println("polling completed unsuccessfully with status:"
+ trainingOperationResponse.getStatus());
return Mono.empty();
}
}).block();
receiptPageResults.forEach(extractedReceiptItem -> {
System.out.printf("Page Number %s%n", extractedReceiptItem.getPageMetadata().getPageNumber());
System.out.printf("Merchant Name %s%n", extractedReceiptItem.getMerchantName().getText());
System.out.printf("Merchant Address %s%n", extractedReceiptItem.getMerchantAddress().getText());
System.out.printf("Merchant Phone Number %s%n", extractedReceiptItem.getMerchantPhoneNumber().getText());
System.out.printf("Total: %s%n", extractedReceiptItem.getTotal().getText());
System.out.printf("Receipt Items: %n");
extractedReceiptItem.getReceiptItems().forEach(receiptItem -> {
System.out.printf("Name: %s%n", receiptItem.getName().getText());
System.out.printf("Quantity: %s%n", receiptItem.getQuantity().getText());
System.out.printf("Total Price: %s%n", receiptItem.getTotalPrice().getText());
System.out.println();
});
});
} | class ExtractPrebuiltReceiptAsync {
} | class ExtractPrebuiltReceiptAsync {
/**
* Sample for extracting receipt information using input stream.
*
* @param args Unused. Arguments to the program.
*/
} |
Will discuss offline on this one with you. | public Mono<CosmosAsyncUserDefinedFunctionResponse> replace(CosmosUserDefinedFunctionProperties udfSettings) {
return container.getDatabase()
.getDocClientWrapper()
.replaceUserDefinedFunction(new UserDefinedFunction(ModelBridgeInternal.toJsonFromJsonSerializable(udfSettings)), null)
.map(response -> ModelBridgeInternal.createCosmosAsyncUserDefinedFunctionResponse(response, container))
.single();
} | .replaceUserDefinedFunction(new UserDefinedFunction(ModelBridgeInternal.toJsonFromJsonSerializable(udfSettings)), null) | public Mono<CosmosAsyncUserDefinedFunctionResponse> replace(CosmosUserDefinedFunctionProperties udfSettings) {
return container.getDatabase()
.getDocClientWrapper()
.replaceUserDefinedFunction(new UserDefinedFunction(ModelBridgeInternal.toJsonFromJsonSerializable(udfSettings)), null)
.map(response -> ModelBridgeInternal.createCosmosAsyncUserDefinedFunctionResponse(response, container))
.single();
} | class CosmosAsyncUserDefinedFunction {
@SuppressWarnings("EnforceFinalFields")
private final CosmosAsyncContainer container;
private String id;
CosmosAsyncUserDefinedFunction(String id, CosmosAsyncContainer container) {
this.id = id;
this.container = container;
}
/**
* Get the id of the {@link CosmosAsyncUserDefinedFunction}
*
* @return the id of the {@link CosmosAsyncUserDefinedFunction}
*/
public String getId() {
return id;
}
/**
* Set the id of the {@link CosmosAsyncUserDefinedFunction}
*
* @param id the id of the {@link CosmosAsyncUserDefinedFunction}
* @return the same {@link CosmosAsyncUserDefinedFunction} that had the id set
*/
CosmosAsyncUserDefinedFunction setId(String id) {
this.id = id;
return this;
}
/**
* Read a user defined function.
* <p>
* After subscription the operation will be performed.
* The {@link Mono} upon successful completion will contain a single resource response for the read user defined
* function.
* In case of failure the {@link Mono} will error.
*
* @return an {@link Mono} containing the single resource response for the read user defined function or an error.
*/
public Mono<CosmosAsyncUserDefinedFunctionResponse> read() {
return container.getDatabase().getDocClientWrapper().readUserDefinedFunction(getLink(), null)
.map(response -> ModelBridgeInternal.createCosmosAsyncUserDefinedFunctionResponse(response, container)).single();
}
/**
* Replaces a cosmos user defined function.
* <p>
* After subscription the operation will be performed.
* The {@link Mono} upon successful completion will contain a single resource response with the replaced user
* defined function.
* In case of failure the {@link Mono} will error.
*
* @param udfSettings the cosmos user defined function properties.
* @return an {@link Mono} containing the single resource response with the replaced cosmos user defined function
* or an error.
*/
/**
* Deletes a cosmos user defined function.
* <p>
* After subscription the operation will be performed.
* The {@link Mono} upon successful completion will contain a single resource response for the deleted user
* defined function.
* In case of failure the {@link Mono} will error.
*
* @return an {@link Mono} containing the single resource response for the deleted cosmos user defined function or
* an error.
*/
public Mono<CosmosAsyncUserDefinedFunctionResponse> delete() {
return container.getDatabase()
.getDocClientWrapper()
.deleteUserDefinedFunction(this.getLink(), null)
.map(response -> ModelBridgeInternal.createCosmosAsyncUserDefinedFunctionResponse(response, container))
.single();
}
String getURIPathSegment() {
return Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT;
}
String getParentLink() {
return container.getLink();
}
String getLink() {
StringBuilder builder = new StringBuilder();
builder.append(getParentLink());
builder.append("/");
builder.append(getURIPathSegment());
builder.append("/");
builder.append(getId());
return builder.toString();
}
} | class CosmosAsyncUserDefinedFunction {
@SuppressWarnings("EnforceFinalFields")
private final CosmosAsyncContainer container;
private String id;
CosmosAsyncUserDefinedFunction(String id, CosmosAsyncContainer container) {
this.id = id;
this.container = container;
}
/**
* Get the id of the {@link CosmosAsyncUserDefinedFunction}
*
* @return the id of the {@link CosmosAsyncUserDefinedFunction}
*/
public String getId() {
return id;
}
/**
* Set the id of the {@link CosmosAsyncUserDefinedFunction}
*
* @param id the id of the {@link CosmosAsyncUserDefinedFunction}
* @return the same {@link CosmosAsyncUserDefinedFunction} that had the id set
*/
CosmosAsyncUserDefinedFunction setId(String id) {
this.id = id;
return this;
}
/**
* Read a user defined function.
* <p>
* After subscription the operation will be performed.
* The {@link Mono} upon successful completion will contain a single resource response for the read user defined
* function.
* In case of failure the {@link Mono} will error.
*
* @return an {@link Mono} containing the single resource response for the read user defined function or an error.
*/
public Mono<CosmosAsyncUserDefinedFunctionResponse> read() {
return container.getDatabase().getDocClientWrapper().readUserDefinedFunction(getLink(), null)
.map(response -> ModelBridgeInternal.createCosmosAsyncUserDefinedFunctionResponse(response, container)).single();
}
/**
* Replaces a cosmos user defined function.
* <p>
* After subscription the operation will be performed.
* The {@link Mono} upon successful completion will contain a single resource response with the replaced user
* defined function.
* In case of failure the {@link Mono} will error.
*
* @param udfSettings the cosmos user defined function properties.
* @return an {@link Mono} containing the single resource response with the replaced cosmos user defined function
* or an error.
*/
/**
* Deletes a cosmos user defined function.
* <p>
* After subscription the operation will be performed.
* The {@link Mono} upon successful completion will contain a single resource response for the deleted user
* defined function.
* In case of failure the {@link Mono} will error.
*
* @return an {@link Mono} containing the single resource response for the deleted cosmos user defined function or
* an error.
*/
public Mono<CosmosAsyncUserDefinedFunctionResponse> delete() {
return container.getDatabase()
.getDocClientWrapper()
.deleteUserDefinedFunction(this.getLink(), null)
.map(response -> ModelBridgeInternal.createCosmosAsyncUserDefinedFunctionResponse(response, container))
.single();
}
String getURIPathSegment() {
return Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT;
}
String getParentLink() {
return container.getLink();
}
String getLink() {
StringBuilder builder = new StringBuilder();
builder.append(getParentLink());
builder.append("/");
builder.append(getURIPathSegment());
builder.append("/");
builder.append(getId());
return builder.toString();
}
} |
This could be injected into the `flatMap` lambda. | public static void main(final String[] args) {
final FormRecognizerAsyncClient client = new FormRecognizerClientBuilder()
.apiKey(new AzureKeyCredential(""))
.endpoint("https:
.buildAsyncClient();
String receiptUrl = "https:
PollerFlux<OperationResult, IterableStream<ExtractedReceipt>> analyzeReceiptPoller =
client.beginExtractReceipt(receiptUrl, true, Duration.ofSeconds(1));
IterableStream<ExtractedReceipt> receiptPageResults = analyzeReceiptPoller
.last()
.flatMap(trainingOperationResponse -> {
try {
Thread.sleep(20000);
} catch (InterruptedException e) {
e.printStackTrace();
}
if (trainingOperationResponse.getStatus().isComplete()) {
System.out.println("Polling completed successfully");
return trainingOperationResponse.getFinalResult();
} else {
System.out.println("polling completed unsuccessfully with status:"
+ trainingOperationResponse.getStatus());
return Mono.empty();
}
}).block();
for (ExtractedReceipt extractedReceiptItem : receiptPageResults) {
System.out.printf("Page Number %s%n", extractedReceiptItem.getPageMetadata().getPageNumber());
System.out.printf("Merchant Name %s%n", extractedReceiptItem.getMerchantName().getText());
System.out.printf("Merchant Address %s%n", extractedReceiptItem.getMerchantAddress().getText());
System.out.printf("Merchant Phone Number %s%n", extractedReceiptItem.getMerchantPhoneNumber().getText());
System.out.printf("Total: %s%n", extractedReceiptItem.getTotal().getText());
System.out.printf("Receipt Items: %n");
extractedReceiptItem.getReceiptItems().forEach(receiptItem -> {
System.out.printf("Name: %s%n", receiptItem.getName().getText());
System.out.printf("Quantity: %s%n", receiptItem.getQuantity().getText());
System.out.printf("Total Price: %s%n", receiptItem.getTotalPrice().getText());
System.out.println();
});
}
} | for (ExtractedReceipt extractedReceiptItem : receiptPageResults) { | public static void main(final String[] args) {
FormRecognizerAsyncClient client = new FormRecognizerClientBuilder()
.apiKey(new AzureKeyCredential("{api_key}"))
.endpoint("https:
.buildAsyncClient();
String receiptUrl = "https:
PollerFlux<OperationResult, IterableStream<ExtractedReceipt>> analyzeReceiptPoller =
client.beginExtractReceiptsFromUrl(receiptUrl);
IterableStream<ExtractedReceipt> receiptPageResults = analyzeReceiptPoller
.last()
.flatMap(trainingOperationResponse -> {
if (trainingOperationResponse.getStatus().isComplete()) {
System.out.println("Polling completed successfully");
return trainingOperationResponse.getFinalResult();
} else {
System.out.println("polling completed unsuccessfully with status:"
+ trainingOperationResponse.getStatus());
return Mono.empty();
}
}).block();
receiptPageResults.forEach(extractedReceiptItem -> {
System.out.printf("Page Number %s%n", extractedReceiptItem.getPageMetadata().getPageNumber());
System.out.printf("Merchant Name %s%n", extractedReceiptItem.getMerchantName().getText());
System.out.printf("Merchant Address %s%n", extractedReceiptItem.getMerchantAddress().getText());
System.out.printf("Merchant Phone Number %s%n", extractedReceiptItem.getMerchantPhoneNumber().getText());
System.out.printf("Total: %s%n", extractedReceiptItem.getTotal().getText());
System.out.printf("Receipt Items: %n");
extractedReceiptItem.getReceiptItems().forEach(receiptItem -> {
System.out.printf("Name: %s%n", receiptItem.getName().getText());
System.out.printf("Quantity: %s%n", receiptItem.getQuantity().getText());
System.out.printf("Total Price: %s%n", receiptItem.getTotalPrice().getText());
System.out.println();
});
});
} | class ExtractPrebuiltReceiptAsync {
} | class ExtractPrebuiltReceiptAsync {
/**
* Sample for extracting receipt information using input stream.
*
* @param args Unused. Arguments to the program.
*/
} |
Should put in a fake key, this will throw an exception. | public static void main(final String[] args) throws IOException {
final FormRecognizerClient client = new FormRecognizerClientBuilder()
.apiKey(new AzureKeyCredential(""))
.endpoint("https:
.buildClient();
File sourceFile = new File("
byte[] fileContent = Files.readAllBytes(sourceFile.toPath());
InputStream targetStream = new ByteArrayInputStream(fileContent);
SyncPoller<OperationResult, IterableStream<ExtractedReceipt>> analyzeReceiptPoller =
client.beginExtractReceipt(targetStream, sourceFile.length(), FormContentType.IMAGE_PNG, true,
Duration.ofSeconds(4));
IterableStream<ExtractedReceipt> receiptPageResults = analyzeReceiptPoller.getFinalResult();
for (ExtractedReceipt extractedReceiptItem : receiptPageResults) {
System.out.printf("Page Number %s%n", extractedReceiptItem.getPageMetadata().getPageNumber());
System.out.printf("Merchant Name %s%n", extractedReceiptItem.getMerchantName().getText());
System.out.printf("Merchant Name Value: %s%n", extractedReceiptItem.getMerchantName().getValue());
System.out.printf("Merchant Address %s%n", extractedReceiptItem.getMerchantAddress().getText());
System.out.printf("Merchant Address Value: %s%n", extractedReceiptItem.getMerchantAddress().getValue());
System.out.printf("Merchant Phone Number %s%n", extractedReceiptItem.getMerchantPhoneNumber().getText());
System.out.printf("Merchant Phone Number Value: %s%n", extractedReceiptItem.getMerchantPhoneNumber().getValue());
System.out.printf("Total: %s%n", extractedReceiptItem.getTotal().getText());
System.out.printf("Total Value: %s%n", extractedReceiptItem.getTotal().getValue());
System.out.printf("Receipt Items: %n");
extractedReceiptItem.getReceiptItems().forEach(receiptItem -> {
System.out.printf("Name: %s%n", receiptItem.getName().getText());
System.out.printf("Quantity: %s%n", receiptItem.getQuantity() == null
? "N/A" : receiptItem.getQuantity().getText());
System.out.printf("Total Price: %s%n", receiptItem.getTotalPrice().getText());
System.out.println();
});
}
} | .apiKey(new AzureKeyCredential("")) | public static void main(final String[] args) throws IOException {
FormRecognizerClient client = new FormRecognizerClientBuilder()
.apiKey(new AzureKeyCredential("{api_key}"))
.endpoint("https:
.buildClient();
File sourceFile = new File("
byte[] fileContent = Files.readAllBytes(sourceFile.toPath());
InputStream targetStream = new ByteArrayInputStream(fileContent);
SyncPoller<OperationResult, IterableStream<ExtractedReceipt>> analyzeReceiptPoller =
client.beginExtractReceipts(targetStream, sourceFile.length(), FormContentType.IMAGE_PNG, true,
Duration.ofSeconds(5));
IterableStream<ExtractedReceipt> receiptPageResults = analyzeReceiptPoller.getFinalResult();
receiptPageResults.forEach(extractedReceiptItem -> {
System.out.printf("Page Number %s%n", extractedReceiptItem.getPageMetadata().getPageNumber());
System.out.printf("Merchant Name %s%n", extractedReceiptItem.getMerchantName().getText());
System.out.printf("Merchant Name Value: %s%n", extractedReceiptItem.getMerchantName().getValue());
System.out.printf("Merchant Address %s%n", extractedReceiptItem.getMerchantAddress().getText());
System.out.printf("Merchant Address Value: %s%n", extractedReceiptItem.getMerchantAddress().getValue());
System.out.printf("Merchant Phone Number %s%n", extractedReceiptItem.getMerchantPhoneNumber().getText());
System.out.printf("Merchant Phone Number Value: %s%n", extractedReceiptItem.getMerchantPhoneNumber().getValue());
System.out.printf("Total: %s%n", extractedReceiptItem.getTotal().getText());
System.out.printf("Total Value: %s%n", extractedReceiptItem.getTotal().getValue());
System.out.printf("Receipt Items: %n");
extractedReceiptItem.getReceiptItems().forEach(receiptItem -> {
System.out.printf("Name: %s%n", receiptItem.getName().getText());
System.out.printf("Quantity: %s%n", receiptItem.getQuantity() == null
? "N/A" : receiptItem.getQuantity().getText());
System.out.printf("Total Price: %s%n", receiptItem.getTotalPrice().getText());
System.out.println();
});
});
} | class ExtractPrebuiltReceiptSync {
/**
* Sample for extracting receipt information using input stream.
*
* @param args Unused. Arguments to the program.
* @throws IOException Exception thrown when there is an error in reading all the bytes from the File.
*/
} | class ExtractPrebuiltReceiptSync {
/**
* Sample for extracting receipt information using input stream.
*
* @param args Unused. Arguments to the program.
* @throws IOException Exception thrown when there is an error in reading all the bytes from the File.
*/
} |
As this being a trained model, it would be unlikely to have additional keys or to expect this to be updated very often. Further, for any new introductions from the service side to keep this up to date this will have to go through updates accordingly. Also, with such specific string matching, I am not sure how we could make it generic in nature? | static IterableStream<ExtractedReceipt> toReceipt(AnalyzeResult analyzeResult, boolean includeTextDetails) {
List<ReadResult> readResults = analyzeResult.getReadResults();
List<DocumentResult> documentResult = analyzeResult.getDocumentResults();
List<ExtractedReceipt> extractedReceiptList = new ArrayList<>();
for (int i = 0; i < readResults.size(); i++) {
ReadResult readResultItem = readResults.get(i);
PageMetadata pageMetadata = getPageInfo(readResultItem);
ExtractedReceipt extractedReceiptItem = new ExtractedReceipt(pageMetadata);
DocumentResult documentResultItem = documentResult.get(i);
documentResultItem.getFields().forEach((key, fieldValue) -> {
switch (key) {
case "ReceiptType":
extractedReceiptItem.setReceiptType(new ReceiptType(fieldValue.getValueString(),
fieldValue.getConfidence()));
break;
case "MerchantName":
extractedReceiptItem.setMerchantName(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "MerchantAddress":
extractedReceiptItem.setMerchantAddress(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "MerchantPhoneNumber":
extractedReceiptItem.setMerchantPhoneNumber(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Subtotal":
extractedReceiptItem.setSubtotal(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Tax":
extractedReceiptItem.setTax(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Tip":
extractedReceiptItem.setTip(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Total":
extractedReceiptItem.setTotal(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "TransactionDate":
extractedReceiptItem.setTransactionDate(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "TransactionTime":
extractedReceiptItem.setTransactionTime(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Items":
extractedReceiptItem.setReceiptItems(toReceiptItems(fieldValue.getValueArray(), readResults, includeTextDetails));
break;
default:
break;
}
});
extractedReceiptList.add(extractedReceiptItem);
}
return new IterableStream<>(extractedReceiptList);
} | break; | static IterableStream<ExtractedReceipt> toReceipt(AnalyzeResult analyzeResult, boolean includeTextDetails) {
List<ReadResult> readResults = analyzeResult.getReadResults();
List<DocumentResult> documentResult = analyzeResult.getDocumentResults();
List<ExtractedReceipt> extractedReceiptList = new ArrayList<>();
for (int i = 0; i < readResults.size(); i++) {
ReadResult readResultItem = readResults.get(i);
PageMetadata pageMetadata = getPageInfo(readResultItem);
PageRange pageRange = null;
DocumentResult documentResultItem = documentResult.get(i);
List<Integer> receiptPageRange = documentResultItem.getPageRange();
if (receiptPageRange.size() == 2) {
pageRange = new PageRange(receiptPageRange.get(0), receiptPageRange.get(1));
}
ExtractedReceipt extractedReceiptItem = new ExtractedReceipt(pageMetadata, pageRange);
Map<String, FieldValue<?>> extractedFieldMap = new HashMap<>();
documentResultItem.getFields().forEach((key, fieldValue) -> {
switch (key) {
case "ReceiptType":
extractedReceiptItem.setReceiptType(new ReceiptType(fieldValue.getValueString(),
fieldValue.getConfidence()));
break;
case "MerchantName":
extractedReceiptItem.setMerchantName(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "MerchantAddress":
extractedReceiptItem.setMerchantAddress(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "MerchantPhoneNumber":
extractedReceiptItem.setMerchantPhoneNumber(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Subtotal":
extractedReceiptItem.setSubtotal(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Tax":
extractedReceiptItem.setTax(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Tip":
extractedReceiptItem.setTip(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Total":
extractedReceiptItem.setTotal(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "TransactionDate":
extractedReceiptItem.setTransactionDate(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "TransactionTime":
extractedReceiptItem.setTransactionTime(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Items":
extractedReceiptItem.setReceiptItems(toReceiptItems(fieldValue.getValueArray(), readResults, includeTextDetails));
break;
default:
extractedFieldMap.putIfAbsent(key, setFieldValue(fieldValue, readResults, includeTextDetails));
break;
}
});
extractedReceiptItem.setExtractedFields(extractedFieldMap);
extractedReceiptList.add(extractedReceiptItem);
}
return new IterableStream<>(extractedReceiptList);
} | class Transforms {
private static final ClientLogger LOGGER = new ClientLogger(Transforms.class);
private static final Pattern COMPILE = Pattern.compile("[^0-9]+");
private Transforms() {
}
private static FieldValue<?> setFieldValue(com.azure.ai.formrecognizer.implementation.models.FieldValue fieldValue,
List<ReadResult> readResults, boolean includeTextDetails) {
FieldValue<?> value;
switch (fieldValue.getType()) {
case PHONE_NUMBER:
case STRING:
case TIME:
case DATE:
value = toFieldValueString(fieldValue);
break;
case INTEGER:
value = toFieldValueInteger(fieldValue);
break;
case NUMBER:
value = toFieldValueNumber(fieldValue);
break;
case ARRAY:
case OBJECT:
default:
throw LOGGER.logExceptionAsError(new RuntimeException("FieldValue Type not supported"));
}
if (includeTextDetails) {
value.setElements(setReferenceElements(readResults, fieldValue.getElements()));
}
return value;
}
private static PageMetadata getPageInfo(ReadResult readResultItem) {
return new PageMetadata(TextLanguage.fromString(readResultItem.getLanguage().toString()),
readResultItem.getHeight(), readResultItem.getPage(), readResultItem.getWidth(),
readResultItem.getAngle(), DimensionUnit.fromString(readResultItem.getUnit().toString()));
}
private static List<Element> setReferenceElements(List<ReadResult> readResults, List<String> elements) {
List<Element> elementList = new ArrayList<>();
elements.forEach(elementString -> {
String[] indices = COMPILE.matcher(elementString).replaceAll(" ").trim().split(" ");
int readResultIndex, lineIndex;
if (indices.length >= 1) {
readResultIndex = Integer.parseInt(indices[0]);
lineIndex = Integer.parseInt(indices[1]);
} else {
throw LOGGER.logExceptionAsError(new RuntimeException("Reference Elements not found"));
}
if (indices.length == 3) {
int wordIndex = Integer.parseInt(indices[2]);
TextWord textWord = readResults.get(readResultIndex).getLines().get(lineIndex).getWords()
.get(wordIndex);
WordElement wordElement = new WordElement(textWord.getText(), toBoundingBox(textWord.getBoundingBox()));
elementList.add(wordElement);
} else {
TextLine textLine = readResults.get(readResultIndex).getLines().get(lineIndex);
LineElement lineElement = new LineElement(textLine.getText(), toBoundingBox(textLine.getBoundingBox()));
elementList.add(lineElement);
}
});
return elementList;
}
private static BoundingBox toBoundingBox(List<Float> boundingBox) {
List<Point> pointList = new ArrayList<>();
for (int i = 0; i < boundingBox.size(); i += 2) {
Point point = new Point(boundingBox.get(i), boundingBox.get(i + 1));
pointList.add(point);
}
return new BoundingBox(pointList);
}
private static List<ReceiptItem> toReceiptItems(
List<com.azure.ai.formrecognizer.implementation.models.FieldValue> fieldValue, List<ReadResult> readResults, boolean includeTextDetails) {
List<ReceiptItem> receiptItemList = new ArrayList<>();
fieldValue.forEach(fieldValue1 -> {
ReceiptItem receiptItem = new ReceiptItem();
fieldValue1.getValueObject().forEach((key, fieldValue2) -> {
switch (key) {
case "Quantity":
receiptItem.setQuantity(setFieldValue(fieldValue2, readResults, includeTextDetails));
break;
case "Name":
receiptItem.setName(setFieldValue(fieldValue2, readResults, includeTextDetails));
break;
case "TotalPrice":
receiptItem.setTotalPrice(setFieldValue(fieldValue2, readResults, includeTextDetails));
break;
default:
break;
}
});
receiptItemList.add(receiptItem);
});
return receiptItemList;
}
private static IntegerValue toFieldValueInteger(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceIntegerValue) {
if (serviceIntegerValue.getValueNumber() != null) {
return new IntegerValue(serviceIntegerValue.getText(), toBoundingBox(serviceIntegerValue.getBoundingBox()),
serviceIntegerValue.getValueInteger());
}
return new IntegerValue(serviceIntegerValue.getText(), toBoundingBox(serviceIntegerValue.getBoundingBox()), 0);
}
private static StringValue toFieldValueString(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceStringValue) {
return new StringValue(serviceStringValue.getText(), toBoundingBox(serviceStringValue.getBoundingBox()),
serviceStringValue.getValueString());
}
private static FloatValue toFieldValueNumber(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceFloatValue) {
return new FloatValue(serviceFloatValue.getText(), toBoundingBox(serviceFloatValue.getBoundingBox()),
serviceFloatValue.getValueNumber());
}
} | class Transforms {
private static final ClientLogger LOGGER = new ClientLogger(Transforms.class);
private static final Pattern COMPILE = Pattern.compile("[^0-9]+");
private Transforms() {
}
/**
* Helper method to convert the {@link com.azure.ai.formrecognizer.implementation.models.AnalyzeOperationResult}
* service level receipt model to list of {@link ExtractedReceipt}.
*
* @param analyzeResult The result of the analyze receipt operation returned by the service.
* @param includeTextDetails When set to true, a list of references to the text elements is returned in the read result.
*
* @return A list of {@link ExtractedReceipt} to represent the list of extracted receipt information.
*/
/**
* Helper method that converts the incoming service field value to one of the strongly typed SDK level {@link FieldValue} with
* reference elements set when {@code includeTextDetails} is set to true.
*
* @param fieldValue The named field values returned by the service.
* @param readResults The result containing the list of element references when includeTextDetails is set to true.
* @param includeTextDetails When set to true, a list of references to the text elements is returned in the read result.
*
* @return The strongly typed {@link FieldValue} for the field input.
*/
private static FieldValue<?> setFieldValue(com.azure.ai.formrecognizer.implementation.models.FieldValue fieldValue,
List<ReadResult> readResults, boolean includeTextDetails) {
FieldValue<?> value;
switch (fieldValue.getType()) {
case PHONE_NUMBER:
value = toFieldValuePhoneNumber(fieldValue);
break;
case STRING:
value = toFieldValueString(fieldValue);
break;
case TIME:
value = toFieldValueTime(fieldValue);
break;
case DATE:
value = toFieldValueDate(fieldValue);
break;
case INTEGER:
value = toFieldValueInteger(fieldValue);
break;
case NUMBER:
value = toFieldValueNumber(fieldValue);
break;
case ARRAY:
case OBJECT:
default:
throw LOGGER.logExceptionAsError(new RuntimeException("FieldValue Type not supported"));
}
if (includeTextDetails) {
value.setElements(setReferenceElements(readResults, fieldValue.getElements()));
}
return value;
}
/**
* Helper method that converts the service returned page information to SDK model {@link PageMetadata}.
*
* @param readResultItem A read result item returned from the service containing the page information for provided
* input.
*
* @return The {@link PageMetadata} for the receipt page.
*/
private static PageMetadata getPageInfo(ReadResult readResultItem) {
return new PageMetadata(readResultItem.getHeight(), readResultItem.getPage(), readResultItem.getWidth(),
readResultItem.getAngle(), DimensionUnit.fromString(readResultItem.getUnit().toString()));
}
/**
* Helper method to set the text reference elements on FieldValue/fields when {@code includeTextDetails} set to true.
*
* @param readResults The ReadResult containing the resolved references for text elements.
* @param elements When includeTextDetails is set to true, a list of references to the text
* elements constituting this field value.
*
* @return The updated {@link FieldValue} object with list if referenced elements.
*/
private static List<Element> setReferenceElements(List<ReadResult> readResults, List<String> elements) {
List<Element> elementList = new ArrayList<>();
elements.forEach(elementString -> {
String[] indices = COMPILE.matcher(elementString).replaceAll(" ").trim().split(" ");
int readResultIndex, lineIndex;
if (indices.length >= 1) {
readResultIndex = Integer.parseInt(indices[0]);
lineIndex = Integer.parseInt(indices[1]);
} else {
throw LOGGER.logExceptionAsError(new RuntimeException("Reference Elements not found"));
}
if (indices.length == 3) {
int wordIndex = Integer.parseInt(indices[2]);
TextWord textWord = readResults.get(readResultIndex).getLines().get(lineIndex).getWords()
.get(wordIndex);
WordElement wordElement = new WordElement(textWord.getText(), toBoundingBox(textWord.getBoundingBox()));
elementList.add(wordElement);
} else {
TextLine textLine = readResults.get(readResultIndex).getLines().get(lineIndex);
LineElement lineElement = new LineElement(textLine.getText(), toBoundingBox(textLine.getBoundingBox()));
elementList.add(lineElement);
}
});
return elementList;
}
/**
* Helper method to convert the service level modeled eight numbers representing the four points to SDK level
* {@link BoundingBox}.
*
* @param boundingBox A list of eight numbers representing the four points of a box.
*
* @return A {@link BoundingBox}.
*/
private static BoundingBox toBoundingBox(List<Float> boundingBox) {
BoundingBox boundingBox1;
if (boundingBox.size() == 8) {
Point topLeft = new Point(boundingBox.get(0), boundingBox.get(1));
Point topRight = new Point(boundingBox.get(2), boundingBox.get(3));
Point bottomLeft = new Point(boundingBox.get(4), boundingBox.get(5));
Point bottomRight = new Point(boundingBox.get(6), boundingBox.get(7));
boundingBox1 = new BoundingBox(topLeft, topRight, bottomLeft, bottomRight);
} else {
return null;
}
return boundingBox1;
}
/**
* Helper method to convert the service level {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to SDK level {@link ReceiptItem receipt items}.
*
* @param fieldValueItems The named field values returned by the service.
* @param readResults The result containing the list of element references when includeTextDetails is set to true.
* @param includeTextDetails When set to true, a list of references to the text elements is returned in the read result.
*
* @return A list of {@link ReceiptItem}.
*/
private static List<ReceiptItem> toReceiptItems(
List<com.azure.ai.formrecognizer.implementation.models.FieldValue> fieldValueItems, List<ReadResult> readResults, boolean includeTextDetails) {
List<ReceiptItem> receiptItemList = new ArrayList<>();
for (com.azure.ai.formrecognizer.implementation.models.FieldValue eachFieldValue : fieldValueItems) {
ReceiptItem receiptItem = new ReceiptItem();
for (ReceiptItemType key : ReceiptItemType.values()) {
com.azure.ai.formrecognizer.implementation.models.FieldValue item = eachFieldValue.getValueObject().get(key.toString());
if (QUANTITY.equals(key) && item != null) {
receiptItem.setQuantity(setFieldValue(item, readResults, includeTextDetails));
} else if (NAME.equals(key) && item != null) {
receiptItem.setName(setFieldValue(item, readResults, includeTextDetails));
} else if (PRICE.equals(key) && item != null) {
receiptItem.setPrice(setFieldValue(item, readResults, includeTextDetails));
} else if (TOTAL_PRICE.equals(key) && item != null) {
receiptItem.setTotalPrice(setFieldValue(item, readResults, includeTextDetails));
}
}
receiptItemList.add(receiptItem);
}
return receiptItemList;
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link IntegerValue}
*
* @param serviceIntegerValue The integer value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link IntegerValue}.
*/
private static IntegerValue toFieldValueInteger(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceIntegerValue) {
if (serviceIntegerValue.getValueNumber() != null) {
return new IntegerValue(serviceIntegerValue.getText(), toBoundingBox(serviceIntegerValue.getBoundingBox()),
serviceIntegerValue.getValueInteger(), serviceIntegerValue.getPage());
}
return new IntegerValue(serviceIntegerValue.getText(), toBoundingBox(serviceIntegerValue.getBoundingBox()), null, serviceIntegerValue.getPage());
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link StringValue}.
*
* @param serviceStringValue The string value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link StringValue}.
*/
private static StringValue toFieldValueString(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceStringValue) {
return new StringValue(serviceStringValue.getText(), toBoundingBox(serviceStringValue.getBoundingBox()),
serviceStringValue.getValueString(), serviceStringValue.getPage());
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link FloatValue}.
*
* @param serviceFloatValue The float value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link FloatValue}.
*/
private static FloatValue toFieldValueNumber(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceFloatValue) {
if (serviceFloatValue.getValueNumber() != null) {
return new FloatValue(serviceFloatValue.getText(), toBoundingBox(serviceFloatValue.getBoundingBox()),
serviceFloatValue.getValueNumber(), serviceFloatValue.getPage());
}
return new FloatValue(serviceFloatValue.getText(), toBoundingBox(serviceFloatValue.getBoundingBox()), null, serviceFloatValue.getPage());
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link StringValue}.
*
* @param serviceDateValue The string value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link StringValue}.
*/
private static StringValue toFieldValuePhoneNumber(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceDateValue) {
return new StringValue(serviceDateValue.getText(), toBoundingBox(serviceDateValue.getBoundingBox()),
serviceDateValue.getValuePhoneNumber(), serviceDateValue.getPage());
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link DateValue}.
*
* @param serviceDateValue The string value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link StringValue}.
*/
private static DateValue toFieldValueDate(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceDateValue) {
return new DateValue(serviceDateValue.getText(), toBoundingBox(serviceDateValue.getBoundingBox()),
serviceDateValue.getValueDate(), serviceDateValue.getPage());
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link TimeValue}.
*
* @param serviceDateValue The string value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link TimeValue}.
*
*/
private static TimeValue toFieldValueTime(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceDateValue) {
return new TimeValue(serviceDateValue.getText(), toBoundingBox(serviceDateValue.getBoundingBox()),
serviceDateValue.getValueTime(), serviceDateValue.getPage());
}
} |
I have added a separate issue to track this #9690 And if it helps, the service docs do mention a size limit specification >Image file size must be less than 20 MB. I will also add it to our javadocs. | private static ByteBuffer toByteArray(InputStream in) throws IOException {
ByteArrayOutputStream os = new ByteArrayOutputStream();
byte[] buffer = new byte[1024];
int len;
while ((len = in.read(buffer)) != -1) {
os.write(buffer, 0, len);
}
return ByteBuffer.wrap(os.toByteArray());
} | } | private static ByteBuffer toByteArray(InputStream in) {
try {
ByteArrayOutputStream os = new ByteArrayOutputStream();
byte[] buffer = new byte[1024];
int len;
while ((len = in.read(buffer)) != -1) {
os.write(buffer, 0, len);
}
return ByteBuffer.wrap(os.toByteArray());
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
/**
* A utility method for converting the input stream to Flux of ByteBuffer.
*
* @param data The input data which needs to convert to ByteBuffer.
*
* @return {@link ByteBuffer} which contains the input data.
* @throws RuntimeException When I/O error occurs.
*/
public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data) {
try {
return Flux.just(toByteArray(data));
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
private Utility() {
}
/**
* A utility method for converting the input stream to Flux of ByteBuffer.
*
* @param data The input data which needs to convert to ByteBuffer.
*
* @return {@link ByteBuffer} which contains the input data.
* @throws RuntimeException When I/O error occurs.
*/
public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data) {
return Flux.just(toByteArray(data))
.doOnError(error -> LOGGER.warning("Failed to convert stream to byte array - {}", error));
}
} |
If these keys are not going to be updated frequently, can this be an ExpandableStringEnum? | static IterableStream<ExtractedReceipt> toReceipt(AnalyzeResult analyzeResult, boolean includeTextDetails) {
List<ReadResult> readResults = analyzeResult.getReadResults();
List<DocumentResult> documentResult = analyzeResult.getDocumentResults();
List<ExtractedReceipt> extractedReceiptList = new ArrayList<>();
for (int i = 0; i < readResults.size(); i++) {
ReadResult readResultItem = readResults.get(i);
PageMetadata pageMetadata = getPageInfo(readResultItem);
ExtractedReceipt extractedReceiptItem = new ExtractedReceipt(pageMetadata);
DocumentResult documentResultItem = documentResult.get(i);
documentResultItem.getFields().forEach((key, fieldValue) -> {
switch (key) {
case "ReceiptType":
extractedReceiptItem.setReceiptType(new ReceiptType(fieldValue.getValueString(),
fieldValue.getConfidence()));
break;
case "MerchantName":
extractedReceiptItem.setMerchantName(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "MerchantAddress":
extractedReceiptItem.setMerchantAddress(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "MerchantPhoneNumber":
extractedReceiptItem.setMerchantPhoneNumber(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Subtotal":
extractedReceiptItem.setSubtotal(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Tax":
extractedReceiptItem.setTax(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Tip":
extractedReceiptItem.setTip(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Total":
extractedReceiptItem.setTotal(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "TransactionDate":
extractedReceiptItem.setTransactionDate(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "TransactionTime":
extractedReceiptItem.setTransactionTime(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Items":
extractedReceiptItem.setReceiptItems(toReceiptItems(fieldValue.getValueArray(), readResults, includeTextDetails));
break;
default:
break;
}
});
extractedReceiptList.add(extractedReceiptItem);
}
return new IterableStream<>(extractedReceiptList);
} | break; | static IterableStream<ExtractedReceipt> toReceipt(AnalyzeResult analyzeResult, boolean includeTextDetails) {
List<ReadResult> readResults = analyzeResult.getReadResults();
List<DocumentResult> documentResult = analyzeResult.getDocumentResults();
List<ExtractedReceipt> extractedReceiptList = new ArrayList<>();
for (int i = 0; i < readResults.size(); i++) {
ReadResult readResultItem = readResults.get(i);
PageMetadata pageMetadata = getPageInfo(readResultItem);
PageRange pageRange = null;
DocumentResult documentResultItem = documentResult.get(i);
List<Integer> receiptPageRange = documentResultItem.getPageRange();
if (receiptPageRange.size() == 2) {
pageRange = new PageRange(receiptPageRange.get(0), receiptPageRange.get(1));
}
ExtractedReceipt extractedReceiptItem = new ExtractedReceipt(pageMetadata, pageRange);
Map<String, FieldValue<?>> extractedFieldMap = new HashMap<>();
documentResultItem.getFields().forEach((key, fieldValue) -> {
switch (key) {
case "ReceiptType":
extractedReceiptItem.setReceiptType(new ReceiptType(fieldValue.getValueString(),
fieldValue.getConfidence()));
break;
case "MerchantName":
extractedReceiptItem.setMerchantName(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "MerchantAddress":
extractedReceiptItem.setMerchantAddress(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "MerchantPhoneNumber":
extractedReceiptItem.setMerchantPhoneNumber(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Subtotal":
extractedReceiptItem.setSubtotal(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Tax":
extractedReceiptItem.setTax(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Tip":
extractedReceiptItem.setTip(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Total":
extractedReceiptItem.setTotal(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "TransactionDate":
extractedReceiptItem.setTransactionDate(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "TransactionTime":
extractedReceiptItem.setTransactionTime(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Items":
extractedReceiptItem.setReceiptItems(toReceiptItems(fieldValue.getValueArray(), readResults, includeTextDetails));
break;
default:
extractedFieldMap.putIfAbsent(key, setFieldValue(fieldValue, readResults, includeTextDetails));
break;
}
});
extractedReceiptItem.setExtractedFields(extractedFieldMap);
extractedReceiptList.add(extractedReceiptItem);
}
return new IterableStream<>(extractedReceiptList);
} | class Transforms {
private static final ClientLogger LOGGER = new ClientLogger(Transforms.class);
private static final Pattern COMPILE = Pattern.compile("[^0-9]+");
private Transforms() {
}
private static FieldValue<?> setFieldValue(com.azure.ai.formrecognizer.implementation.models.FieldValue fieldValue,
List<ReadResult> readResults, boolean includeTextDetails) {
FieldValue<?> value;
switch (fieldValue.getType()) {
case PHONE_NUMBER:
case STRING:
case TIME:
case DATE:
value = toFieldValueString(fieldValue);
break;
case INTEGER:
value = toFieldValueInteger(fieldValue);
break;
case NUMBER:
value = toFieldValueNumber(fieldValue);
break;
case ARRAY:
case OBJECT:
default:
throw LOGGER.logExceptionAsError(new RuntimeException("FieldValue Type not supported"));
}
if (includeTextDetails) {
value.setElements(setReferenceElements(readResults, fieldValue.getElements()));
}
return value;
}
private static PageMetadata getPageInfo(ReadResult readResultItem) {
return new PageMetadata(TextLanguage.fromString(readResultItem.getLanguage().toString()),
readResultItem.getHeight(), readResultItem.getPage(), readResultItem.getWidth(),
readResultItem.getAngle(), DimensionUnit.fromString(readResultItem.getUnit().toString()));
}
private static List<Element> setReferenceElements(List<ReadResult> readResults, List<String> elements) {
List<Element> elementList = new ArrayList<>();
elements.forEach(elementString -> {
String[] indices = COMPILE.matcher(elementString).replaceAll(" ").trim().split(" ");
int readResultIndex, lineIndex;
if (indices.length >= 1) {
readResultIndex = Integer.parseInt(indices[0]);
lineIndex = Integer.parseInt(indices[1]);
} else {
throw LOGGER.logExceptionAsError(new RuntimeException("Reference Elements not found"));
}
if (indices.length == 3) {
int wordIndex = Integer.parseInt(indices[2]);
TextWord textWord = readResults.get(readResultIndex).getLines().get(lineIndex).getWords()
.get(wordIndex);
WordElement wordElement = new WordElement(textWord.getText(), toBoundingBox(textWord.getBoundingBox()));
elementList.add(wordElement);
} else {
TextLine textLine = readResults.get(readResultIndex).getLines().get(lineIndex);
LineElement lineElement = new LineElement(textLine.getText(), toBoundingBox(textLine.getBoundingBox()));
elementList.add(lineElement);
}
});
return elementList;
}
private static BoundingBox toBoundingBox(List<Float> boundingBox) {
List<Point> pointList = new ArrayList<>();
for (int i = 0; i < boundingBox.size(); i += 2) {
Point point = new Point(boundingBox.get(i), boundingBox.get(i + 1));
pointList.add(point);
}
return new BoundingBox(pointList);
}
private static List<ReceiptItem> toReceiptItems(
List<com.azure.ai.formrecognizer.implementation.models.FieldValue> fieldValue, List<ReadResult> readResults, boolean includeTextDetails) {
List<ReceiptItem> receiptItemList = new ArrayList<>();
fieldValue.forEach(fieldValue1 -> {
ReceiptItem receiptItem = new ReceiptItem();
fieldValue1.getValueObject().forEach((key, fieldValue2) -> {
switch (key) {
case "Quantity":
receiptItem.setQuantity(setFieldValue(fieldValue2, readResults, includeTextDetails));
break;
case "Name":
receiptItem.setName(setFieldValue(fieldValue2, readResults, includeTextDetails));
break;
case "TotalPrice":
receiptItem.setTotalPrice(setFieldValue(fieldValue2, readResults, includeTextDetails));
break;
default:
break;
}
});
receiptItemList.add(receiptItem);
});
return receiptItemList;
}
private static IntegerValue toFieldValueInteger(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceIntegerValue) {
if (serviceIntegerValue.getValueNumber() != null) {
return new IntegerValue(serviceIntegerValue.getText(), toBoundingBox(serviceIntegerValue.getBoundingBox()),
serviceIntegerValue.getValueInteger());
}
return new IntegerValue(serviceIntegerValue.getText(), toBoundingBox(serviceIntegerValue.getBoundingBox()), 0);
}
private static StringValue toFieldValueString(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceStringValue) {
return new StringValue(serviceStringValue.getText(), toBoundingBox(serviceStringValue.getBoundingBox()),
serviceStringValue.getValueString());
}
private static FloatValue toFieldValueNumber(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceFloatValue) {
return new FloatValue(serviceFloatValue.getText(), toBoundingBox(serviceFloatValue.getBoundingBox()),
serviceFloatValue.getValueNumber());
}
} | class Transforms {
private static final ClientLogger LOGGER = new ClientLogger(Transforms.class);
private static final Pattern COMPILE = Pattern.compile("[^0-9]+");
private Transforms() {
}
/**
* Helper method to convert the {@link com.azure.ai.formrecognizer.implementation.models.AnalyzeOperationResult}
* service level receipt model to list of {@link ExtractedReceipt}.
*
* @param analyzeResult The result of the analyze receipt operation returned by the service.
* @param includeTextDetails When set to true, a list of references to the text elements is returned in the read result.
*
* @return A list of {@link ExtractedReceipt} to represent the list of extracted receipt information.
*/
/**
* Helper method that converts the incoming service field value to one of the strongly typed SDK level {@link FieldValue} with
* reference elements set when {@code includeTextDetails} is set to true.
*
* @param fieldValue The named field values returned by the service.
* @param readResults The result containing the list of element references when includeTextDetails is set to true.
* @param includeTextDetails When set to true, a list of references to the text elements is returned in the read result.
*
* @return The strongly typed {@link FieldValue} for the field input.
*/
private static FieldValue<?> setFieldValue(com.azure.ai.formrecognizer.implementation.models.FieldValue fieldValue,
List<ReadResult> readResults, boolean includeTextDetails) {
FieldValue<?> value;
switch (fieldValue.getType()) {
case PHONE_NUMBER:
value = toFieldValuePhoneNumber(fieldValue);
break;
case STRING:
value = toFieldValueString(fieldValue);
break;
case TIME:
value = toFieldValueTime(fieldValue);
break;
case DATE:
value = toFieldValueDate(fieldValue);
break;
case INTEGER:
value = toFieldValueInteger(fieldValue);
break;
case NUMBER:
value = toFieldValueNumber(fieldValue);
break;
case ARRAY:
case OBJECT:
default:
throw LOGGER.logExceptionAsError(new RuntimeException("FieldValue Type not supported"));
}
if (includeTextDetails) {
value.setElements(setReferenceElements(readResults, fieldValue.getElements()));
}
return value;
}
/**
* Helper method that converts the service returned page information to SDK model {@link PageMetadata}.
*
* @param readResultItem A read result item returned from the service containing the page information for provided
* input.
*
* @return The {@link PageMetadata} for the receipt page.
*/
private static PageMetadata getPageInfo(ReadResult readResultItem) {
return new PageMetadata(readResultItem.getHeight(), readResultItem.getPage(), readResultItem.getWidth(),
readResultItem.getAngle(), DimensionUnit.fromString(readResultItem.getUnit().toString()));
}
/**
* Helper method to set the text reference elements on FieldValue/fields when {@code includeTextDetails} set to true.
*
* @param readResults The ReadResult containing the resolved references for text elements.
* @param elements When includeTextDetails is set to true, a list of references to the text
* elements constituting this field value.
*
* @return The updated {@link FieldValue} object with list if referenced elements.
*/
private static List<Element> setReferenceElements(List<ReadResult> readResults, List<String> elements) {
List<Element> elementList = new ArrayList<>();
elements.forEach(elementString -> {
String[] indices = COMPILE.matcher(elementString).replaceAll(" ").trim().split(" ");
int readResultIndex, lineIndex;
if (indices.length >= 1) {
readResultIndex = Integer.parseInt(indices[0]);
lineIndex = Integer.parseInt(indices[1]);
} else {
throw LOGGER.logExceptionAsError(new RuntimeException("Reference Elements not found"));
}
if (indices.length == 3) {
int wordIndex = Integer.parseInt(indices[2]);
TextWord textWord = readResults.get(readResultIndex).getLines().get(lineIndex).getWords()
.get(wordIndex);
WordElement wordElement = new WordElement(textWord.getText(), toBoundingBox(textWord.getBoundingBox()));
elementList.add(wordElement);
} else {
TextLine textLine = readResults.get(readResultIndex).getLines().get(lineIndex);
LineElement lineElement = new LineElement(textLine.getText(), toBoundingBox(textLine.getBoundingBox()));
elementList.add(lineElement);
}
});
return elementList;
}
/**
* Helper method to convert the service level modeled eight numbers representing the four points to SDK level
* {@link BoundingBox}.
*
* @param boundingBox A list of eight numbers representing the four points of a box.
*
* @return A {@link BoundingBox}.
*/
private static BoundingBox toBoundingBox(List<Float> boundingBox) {
BoundingBox boundingBox1;
if (boundingBox.size() == 8) {
Point topLeft = new Point(boundingBox.get(0), boundingBox.get(1));
Point topRight = new Point(boundingBox.get(2), boundingBox.get(3));
Point bottomLeft = new Point(boundingBox.get(4), boundingBox.get(5));
Point bottomRight = new Point(boundingBox.get(6), boundingBox.get(7));
boundingBox1 = new BoundingBox(topLeft, topRight, bottomLeft, bottomRight);
} else {
return null;
}
return boundingBox1;
}
/**
* Helper method to convert the service level {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to SDK level {@link ReceiptItem receipt items}.
*
* @param fieldValueItems The named field values returned by the service.
* @param readResults The result containing the list of element references when includeTextDetails is set to true.
* @param includeTextDetails When set to true, a list of references to the text elements is returned in the read result.
*
* @return A list of {@link ReceiptItem}.
*/
private static List<ReceiptItem> toReceiptItems(
List<com.azure.ai.formrecognizer.implementation.models.FieldValue> fieldValueItems, List<ReadResult> readResults, boolean includeTextDetails) {
List<ReceiptItem> receiptItemList = new ArrayList<>();
for (com.azure.ai.formrecognizer.implementation.models.FieldValue eachFieldValue : fieldValueItems) {
ReceiptItem receiptItem = new ReceiptItem();
for (ReceiptItemType key : ReceiptItemType.values()) {
com.azure.ai.formrecognizer.implementation.models.FieldValue item = eachFieldValue.getValueObject().get(key.toString());
if (QUANTITY.equals(key) && item != null) {
receiptItem.setQuantity(setFieldValue(item, readResults, includeTextDetails));
} else if (NAME.equals(key) && item != null) {
receiptItem.setName(setFieldValue(item, readResults, includeTextDetails));
} else if (PRICE.equals(key) && item != null) {
receiptItem.setPrice(setFieldValue(item, readResults, includeTextDetails));
} else if (TOTAL_PRICE.equals(key) && item != null) {
receiptItem.setTotalPrice(setFieldValue(item, readResults, includeTextDetails));
}
}
receiptItemList.add(receiptItem);
}
return receiptItemList;
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link IntegerValue}
*
* @param serviceIntegerValue The integer value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link IntegerValue}.
*/
private static IntegerValue toFieldValueInteger(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceIntegerValue) {
if (serviceIntegerValue.getValueNumber() != null) {
return new IntegerValue(serviceIntegerValue.getText(), toBoundingBox(serviceIntegerValue.getBoundingBox()),
serviceIntegerValue.getValueInteger(), serviceIntegerValue.getPage());
}
return new IntegerValue(serviceIntegerValue.getText(), toBoundingBox(serviceIntegerValue.getBoundingBox()), null, serviceIntegerValue.getPage());
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link StringValue}.
*
* @param serviceStringValue The string value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link StringValue}.
*/
private static StringValue toFieldValueString(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceStringValue) {
return new StringValue(serviceStringValue.getText(), toBoundingBox(serviceStringValue.getBoundingBox()),
serviceStringValue.getValueString(), serviceStringValue.getPage());
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link FloatValue}.
*
* @param serviceFloatValue The float value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link FloatValue}.
*/
private static FloatValue toFieldValueNumber(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceFloatValue) {
if (serviceFloatValue.getValueNumber() != null) {
return new FloatValue(serviceFloatValue.getText(), toBoundingBox(serviceFloatValue.getBoundingBox()),
serviceFloatValue.getValueNumber(), serviceFloatValue.getPage());
}
return new FloatValue(serviceFloatValue.getText(), toBoundingBox(serviceFloatValue.getBoundingBox()), null, serviceFloatValue.getPage());
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link StringValue}.
*
* @param serviceDateValue The string value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link StringValue}.
*/
private static StringValue toFieldValuePhoneNumber(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceDateValue) {
return new StringValue(serviceDateValue.getText(), toBoundingBox(serviceDateValue.getBoundingBox()),
serviceDateValue.getValuePhoneNumber(), serviceDateValue.getPage());
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link DateValue}.
*
* @param serviceDateValue The string value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link StringValue}.
*/
private static DateValue toFieldValueDate(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceDateValue) {
return new DateValue(serviceDateValue.getText(), toBoundingBox(serviceDateValue.getBoundingBox()),
serviceDateValue.getValueDate(), serviceDateValue.getPage());
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link TimeValue}.
*
* @param serviceDateValue The string value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link TimeValue}.
*
*/
private static TimeValue toFieldValueTime(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceDateValue) {
return new TimeValue(serviceDateValue.getText(), toBoundingBox(serviceDateValue.getBoundingBox()),
serviceDateValue.getValueTime(), serviceDateValue.getPage());
}
} |
Instead of silently skipping over unknown keys, if the known set of keys are limited and new keys are not added frequently, this should throw an exception when the key is unknown. And when a new key is added, it should be accompanied by a service version update too right? | static IterableStream<ExtractedReceipt> toReceipt(AnalyzeResult analyzeResult, boolean includeTextDetails) {
List<ReadResult> readResults = analyzeResult.getReadResults();
List<DocumentResult> documentResult = analyzeResult.getDocumentResults();
List<ExtractedReceipt> extractedReceiptList = new ArrayList<>();
for (int i = 0; i < readResults.size(); i++) {
ReadResult readResultItem = readResults.get(i);
PageMetadata pageMetadata = getPageInfo(readResultItem);
PageRange pageRange = null;
DocumentResult documentResultItem = documentResult.get(i);
List<Integer> receiptPageRange = documentResultItem.getPageRange();
if (receiptPageRange.size() == 2) {
pageRange = new PageRange(receiptPageRange.get(0), receiptPageRange.get(1));
}
ExtractedReceipt extractedReceiptItem = new ExtractedReceipt(pageMetadata, pageRange);
documentResultItem.getFields().forEach((key, fieldValue) -> {
switch (key) {
case "ReceiptType":
extractedReceiptItem.setReceiptType(new ReceiptType(fieldValue.getValueString(),
fieldValue.getConfidence()));
break;
case "MerchantName":
extractedReceiptItem.setMerchantName(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "MerchantAddress":
extractedReceiptItem.setMerchantAddress(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "MerchantPhoneNumber":
extractedReceiptItem.setMerchantPhoneNumber(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Subtotal":
extractedReceiptItem.setSubtotal(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Tax":
extractedReceiptItem.setTax(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Tip":
extractedReceiptItem.setTip(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Total":
extractedReceiptItem.setTotal(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "TransactionDate":
extractedReceiptItem.setTransactionDate(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "TransactionTime":
extractedReceiptItem.setTransactionTime(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Items":
extractedReceiptItem.setReceiptItems(toReceiptItems(fieldValue.getValueArray(), readResults, includeTextDetails));
break;
default:
break;
}
});
extractedReceiptList.add(extractedReceiptItem);
}
return new IterableStream<>(extractedReceiptList);
} | break; | static IterableStream<ExtractedReceipt> toReceipt(AnalyzeResult analyzeResult, boolean includeTextDetails) {
List<ReadResult> readResults = analyzeResult.getReadResults();
List<DocumentResult> documentResult = analyzeResult.getDocumentResults();
List<ExtractedReceipt> extractedReceiptList = new ArrayList<>();
for (int i = 0; i < readResults.size(); i++) {
ReadResult readResultItem = readResults.get(i);
PageMetadata pageMetadata = getPageInfo(readResultItem);
PageRange pageRange = null;
DocumentResult documentResultItem = documentResult.get(i);
List<Integer> receiptPageRange = documentResultItem.getPageRange();
if (receiptPageRange.size() == 2) {
pageRange = new PageRange(receiptPageRange.get(0), receiptPageRange.get(1));
}
ExtractedReceipt extractedReceiptItem = new ExtractedReceipt(pageMetadata, pageRange);
Map<String, FieldValue<?>> extractedFieldMap = new HashMap<>();
documentResultItem.getFields().forEach((key, fieldValue) -> {
switch (key) {
case "ReceiptType":
extractedReceiptItem.setReceiptType(new ReceiptType(fieldValue.getValueString(),
fieldValue.getConfidence()));
break;
case "MerchantName":
extractedReceiptItem.setMerchantName(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "MerchantAddress":
extractedReceiptItem.setMerchantAddress(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "MerchantPhoneNumber":
extractedReceiptItem.setMerchantPhoneNumber(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Subtotal":
extractedReceiptItem.setSubtotal(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Tax":
extractedReceiptItem.setTax(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Tip":
extractedReceiptItem.setTip(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Total":
extractedReceiptItem.setTotal(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "TransactionDate":
extractedReceiptItem.setTransactionDate(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "TransactionTime":
extractedReceiptItem.setTransactionTime(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Items":
extractedReceiptItem.setReceiptItems(toReceiptItems(fieldValue.getValueArray(), readResults, includeTextDetails));
break;
default:
extractedFieldMap.putIfAbsent(key, setFieldValue(fieldValue, readResults, includeTextDetails));
break;
}
});
extractedReceiptItem.setExtractedFields(extractedFieldMap);
extractedReceiptList.add(extractedReceiptItem);
}
return new IterableStream<>(extractedReceiptList);
} | class Transforms {
private static final ClientLogger LOGGER = new ClientLogger(Transforms.class);
private static final Pattern COMPILE = Pattern.compile("[^0-9]+");
private Transforms() {
}
/**
* Helper method to convert the {@link com.azure.ai.formrecognizer.implementation.models.AnalyzeOperationResult}
* service level receipt model to list of {@link ExtractedReceipt}.
*
* @param analyzeResult The result of the analyze receipt operation returned by the service.
* @param includeTextDetails When set to true, a list of references to the text elements is returned in the read result.
*
* @return A list of {@link ExtractedReceipt} to represent the list of extracted receipt information.
*/
/**
* Helper method that converts the incoming service field value to one of the strongly typed SDK level {@link FieldValue} with
* reference elements set when {@code includeTextDetails} is set to true.
*
* @param fieldValue The named field values returned by the service.
* @param readResults The result containing the list of element references when includeTextDetails is set to true.
* @param includeTextDetails When set to true, a list of references to the text elements is returned in the read result.
*
* @return The strongly typed {@link FieldValue} for the field input.
*/
private static FieldValue<?> setFieldValue(com.azure.ai.formrecognizer.implementation.models.FieldValue fieldValue,
List<ReadResult> readResults, boolean includeTextDetails) {
FieldValue<?> value;
switch (fieldValue.getType()) {
case PHONE_NUMBER:
case STRING:
case TIME:
case DATE:
value = toFieldValueString(fieldValue);
break;
case INTEGER:
value = toFieldValueInteger(fieldValue);
break;
case NUMBER:
value = toFieldValueNumber(fieldValue);
break;
case ARRAY:
case OBJECT:
default:
throw LOGGER.logExceptionAsError(new RuntimeException("FieldValue Type not supported"));
}
if (includeTextDetails) {
value.setElements(setReferenceElements(readResults, fieldValue.getElements()));
}
return value;
}
/**
* Helper method that converts the service returned page information to SDK model {@link PageMetadata}.
*
* @param readResultItem A read result item returned from the service containing the page information for provided
* input.
*
* @return The {@link PageMetadata} for the receipt page.
*/
private static PageMetadata getPageInfo(ReadResult readResultItem) {
return new PageMetadata(TextLanguage.fromString(readResultItem.getLanguage().toString()),
readResultItem.getHeight(), readResultItem.getPage(), readResultItem.getWidth(),
readResultItem.getAngle(), DimensionUnit.fromString(readResultItem.getUnit().toString()));
}
/**
* Helper method to set the text reference elements on FieldValue/fields when {@code includeTextDetails} set to true.
*
* @param readResults The ReadResult containing the resolved references for text elements.
* @param elements When includeTextDetails is set to true, a list of references to the text
* elements constituting this field value.
*
* @return The updated {@link FieldValue} object with list if referenced elements.
*/
private static List<Element> setReferenceElements(List<ReadResult> readResults, List<String> elements) {
List<Element> elementList = new ArrayList<>();
elements.forEach(elementString -> {
String[] indices = COMPILE.matcher(elementString).replaceAll(" ").trim().split(" ");
int readResultIndex, lineIndex;
if (indices.length >= 1) {
readResultIndex = Integer.parseInt(indices[0]);
lineIndex = Integer.parseInt(indices[1]);
} else {
throw LOGGER.logExceptionAsError(new RuntimeException("Reference Elements not found"));
}
if (indices.length == 3) {
int wordIndex = Integer.parseInt(indices[2]);
TextWord textWord = readResults.get(readResultIndex).getLines().get(lineIndex).getWords()
.get(wordIndex);
WordElement wordElement = new WordElement(textWord.getText(), toBoundingBox(textWord.getBoundingBox()));
elementList.add(wordElement);
} else {
TextLine textLine = readResults.get(readResultIndex).getLines().get(lineIndex);
LineElement lineElement = new LineElement(textLine.getText(), toBoundingBox(textLine.getBoundingBox()));
elementList.add(lineElement);
}
});
return elementList;
}
/**
* Helper method to convert the service level modeled eight numbers representing the four points to SDK level
* {@link BoundingBox}.
*
* @param boundingBox A list of eight numbers representing the four points of a box.
*
* @return A {@link BoundingBox}.
*/
private static BoundingBox toBoundingBox(List<Float> boundingBox) {
List<Point> pointList = new ArrayList<>();
for (int i = 0; i < boundingBox.size(); i += 2) {
Point point = new Point(boundingBox.get(i), boundingBox.get(i + 1));
pointList.add(point);
}
return new BoundingBox(pointList);
}
/**
* Helper method to convert the service level {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to SDK level {@link ReceiptItem receipt items}.
*
* @param fieldValue The named field values returned by the service.
* @param readResults The result containing the list of element references when includeTextDetails is set to true.
* @param includeTextDetails When set to true, a list of references to the text elements is returned in the read result.
*
* @return A list of {@link ReceiptItem}.
*/
private static List<ReceiptItem> toReceiptItems(
List<com.azure.ai.formrecognizer.implementation.models.FieldValue> fieldValue, List<ReadResult> readResults, boolean includeTextDetails) {
List<ReceiptItem> receiptItemList = new ArrayList<>();
fieldValue.forEach(fieldValue1 -> {
ReceiptItem receiptItem = new ReceiptItem();
fieldValue1.getValueObject().forEach((key, fieldValue2) -> {
switch (key) {
case "Quantity":
receiptItem.setQuantity(setFieldValue(fieldValue2, readResults, includeTextDetails));
break;
case "Name":
receiptItem.setName(setFieldValue(fieldValue2, readResults, includeTextDetails));
break;
case "TotalPrice":
receiptItem.setTotalPrice(setFieldValue(fieldValue2, readResults, includeTextDetails));
break;
default:
break;
}
});
receiptItemList.add(receiptItem);
});
return receiptItemList;
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link IntegerValue}
*
* @param serviceIntegerValue The integer value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link IntegerValue}.
*/
private static IntegerValue toFieldValueInteger(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceIntegerValue) {
if (serviceIntegerValue.getValueNumber() != null) {
return new IntegerValue(serviceIntegerValue.getText(), toBoundingBox(serviceIntegerValue.getBoundingBox()),
serviceIntegerValue.getValueInteger(), serviceIntegerValue.getPage());
}
return new IntegerValue(serviceIntegerValue.getText(), toBoundingBox(serviceIntegerValue.getBoundingBox()), 0, serviceIntegerValue.getPage());
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link StringValue}.
*
* @param serviceStringValue The string value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link StringValue}.
*/
private static StringValue toFieldValueString(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceStringValue) {
return new StringValue(serviceStringValue.getText(), toBoundingBox(serviceStringValue.getBoundingBox()),
serviceStringValue.getValueString(), serviceStringValue.getPage());
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link FloatValue}.
*
* @param serviceFloatValue The float value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link FloatValue}.
*/
private static FloatValue toFieldValueNumber(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceFloatValue) {
return new FloatValue(serviceFloatValue.getText(), toBoundingBox(serviceFloatValue.getBoundingBox()),
serviceFloatValue.getValueNumber(), serviceFloatValue.getPage());
}
} | class Transforms {
private static final ClientLogger LOGGER = new ClientLogger(Transforms.class);
private static final Pattern COMPILE = Pattern.compile("[^0-9]+");
private Transforms() {
}
/**
* Helper method to convert the {@link com.azure.ai.formrecognizer.implementation.models.AnalyzeOperationResult}
* service level receipt model to list of {@link ExtractedReceipt}.
*
* @param analyzeResult The result of the analyze receipt operation returned by the service.
* @param includeTextDetails When set to true, a list of references to the text elements is returned in the read result.
*
* @return A list of {@link ExtractedReceipt} to represent the list of extracted receipt information.
*/
/**
* Helper method that converts the incoming service field value to one of the strongly typed SDK level {@link FieldValue} with
* reference elements set when {@code includeTextDetails} is set to true.
*
* @param fieldValue The named field values returned by the service.
* @param readResults The result containing the list of element references when includeTextDetails is set to true.
* @param includeTextDetails When set to true, a list of references to the text elements is returned in the read result.
*
* @return The strongly typed {@link FieldValue} for the field input.
*/
private static FieldValue<?> setFieldValue(com.azure.ai.formrecognizer.implementation.models.FieldValue fieldValue,
List<ReadResult> readResults, boolean includeTextDetails) {
FieldValue<?> value;
switch (fieldValue.getType()) {
case PHONE_NUMBER:
value = toFieldValuePhoneNumber(fieldValue);
break;
case STRING:
value = toFieldValueString(fieldValue);
break;
case TIME:
value = toFieldValueTime(fieldValue);
break;
case DATE:
value = toFieldValueDate(fieldValue);
break;
case INTEGER:
value = toFieldValueInteger(fieldValue);
break;
case NUMBER:
value = toFieldValueNumber(fieldValue);
break;
case ARRAY:
case OBJECT:
default:
throw LOGGER.logExceptionAsError(new RuntimeException("FieldValue Type not supported"));
}
if (includeTextDetails) {
value.setElements(setReferenceElements(readResults, fieldValue.getElements()));
}
return value;
}
/**
* Helper method that converts the service returned page information to SDK model {@link PageMetadata}.
*
* @param readResultItem A read result item returned from the service containing the page information for provided
* input.
*
* @return The {@link PageMetadata} for the receipt page.
*/
private static PageMetadata getPageInfo(ReadResult readResultItem) {
return new PageMetadata(readResultItem.getHeight(), readResultItem.getPage(), readResultItem.getWidth(),
readResultItem.getAngle(), DimensionUnit.fromString(readResultItem.getUnit().toString()));
}
/**
* Helper method to set the text reference elements on FieldValue/fields when {@code includeTextDetails} set to true.
*
* @param readResults The ReadResult containing the resolved references for text elements.
* @param elements When includeTextDetails is set to true, a list of references to the text
* elements constituting this field value.
*
* @return The updated {@link FieldValue} object with list if referenced elements.
*/
private static List<Element> setReferenceElements(List<ReadResult> readResults, List<String> elements) {
List<Element> elementList = new ArrayList<>();
elements.forEach(elementString -> {
String[] indices = COMPILE.matcher(elementString).replaceAll(" ").trim().split(" ");
int readResultIndex, lineIndex;
if (indices.length >= 1) {
readResultIndex = Integer.parseInt(indices[0]);
lineIndex = Integer.parseInt(indices[1]);
} else {
throw LOGGER.logExceptionAsError(new RuntimeException("Reference Elements not found"));
}
if (indices.length == 3) {
int wordIndex = Integer.parseInt(indices[2]);
TextWord textWord = readResults.get(readResultIndex).getLines().get(lineIndex).getWords()
.get(wordIndex);
WordElement wordElement = new WordElement(textWord.getText(), toBoundingBox(textWord.getBoundingBox()));
elementList.add(wordElement);
} else {
TextLine textLine = readResults.get(readResultIndex).getLines().get(lineIndex);
LineElement lineElement = new LineElement(textLine.getText(), toBoundingBox(textLine.getBoundingBox()));
elementList.add(lineElement);
}
});
return elementList;
}
/**
* Helper method to convert the service level modeled eight numbers representing the four points to SDK level
* {@link BoundingBox}.
*
* @param boundingBox A list of eight numbers representing the four points of a box.
*
* @return A {@link BoundingBox}.
*/
private static BoundingBox toBoundingBox(List<Float> boundingBox) {
BoundingBox boundingBox1;
if (boundingBox.size() == 8) {
Point topLeft = new Point(boundingBox.get(0), boundingBox.get(1));
Point topRight = new Point(boundingBox.get(2), boundingBox.get(3));
Point bottomLeft = new Point(boundingBox.get(4), boundingBox.get(5));
Point bottomRight = new Point(boundingBox.get(6), boundingBox.get(7));
boundingBox1 = new BoundingBox(topLeft, topRight, bottomLeft, bottomRight);
} else {
return null;
}
return boundingBox1;
}
/**
* Helper method to convert the service level {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to SDK level {@link ReceiptItem receipt items}.
*
* @param fieldValueItems The named field values returned by the service.
* @param readResults The result containing the list of element references when includeTextDetails is set to true.
* @param includeTextDetails When set to true, a list of references to the text elements is returned in the read result.
*
* @return A list of {@link ReceiptItem}.
*/
private static List<ReceiptItem> toReceiptItems(
List<com.azure.ai.formrecognizer.implementation.models.FieldValue> fieldValueItems, List<ReadResult> readResults, boolean includeTextDetails) {
List<ReceiptItem> receiptItemList = new ArrayList<>();
for (com.azure.ai.formrecognizer.implementation.models.FieldValue eachFieldValue : fieldValueItems) {
ReceiptItem receiptItem = new ReceiptItem();
for (ReceiptItemType key : ReceiptItemType.values()) {
com.azure.ai.formrecognizer.implementation.models.FieldValue item = eachFieldValue.getValueObject().get(key.toString());
if (QUANTITY.equals(key) && item != null) {
receiptItem.setQuantity(setFieldValue(item, readResults, includeTextDetails));
} else if (NAME.equals(key) && item != null) {
receiptItem.setName(setFieldValue(item, readResults, includeTextDetails));
} else if (PRICE.equals(key) && item != null) {
receiptItem.setPrice(setFieldValue(item, readResults, includeTextDetails));
} else if (TOTAL_PRICE.equals(key) && item != null) {
receiptItem.setTotalPrice(setFieldValue(item, readResults, includeTextDetails));
}
}
receiptItemList.add(receiptItem);
}
return receiptItemList;
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link IntegerValue}
*
* @param serviceIntegerValue The integer value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link IntegerValue}.
*/
private static IntegerValue toFieldValueInteger(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceIntegerValue) {
if (serviceIntegerValue.getValueNumber() != null) {
return new IntegerValue(serviceIntegerValue.getText(), toBoundingBox(serviceIntegerValue.getBoundingBox()),
serviceIntegerValue.getValueInteger(), serviceIntegerValue.getPage());
}
return new IntegerValue(serviceIntegerValue.getText(), toBoundingBox(serviceIntegerValue.getBoundingBox()), null, serviceIntegerValue.getPage());
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link StringValue}.
*
* @param serviceStringValue The string value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link StringValue}.
*/
private static StringValue toFieldValueString(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceStringValue) {
return new StringValue(serviceStringValue.getText(), toBoundingBox(serviceStringValue.getBoundingBox()),
serviceStringValue.getValueString(), serviceStringValue.getPage());
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link FloatValue}.
*
* @param serviceFloatValue The float value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link FloatValue}.
*/
private static FloatValue toFieldValueNumber(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceFloatValue) {
if (serviceFloatValue.getValueNumber() != null) {
return new FloatValue(serviceFloatValue.getText(), toBoundingBox(serviceFloatValue.getBoundingBox()),
serviceFloatValue.getValueNumber(), serviceFloatValue.getPage());
}
return new FloatValue(serviceFloatValue.getText(), toBoundingBox(serviceFloatValue.getBoundingBox()), null, serviceFloatValue.getPage());
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link StringValue}.
*
* @param serviceDateValue The string value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link StringValue}.
*/
private static StringValue toFieldValuePhoneNumber(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceDateValue) {
return new StringValue(serviceDateValue.getText(), toBoundingBox(serviceDateValue.getBoundingBox()),
serviceDateValue.getValuePhoneNumber(), serviceDateValue.getPage());
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link DateValue}.
*
* @param serviceDateValue The string value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link StringValue}.
*/
private static DateValue toFieldValueDate(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceDateValue) {
return new DateValue(serviceDateValue.getText(), toBoundingBox(serviceDateValue.getBoundingBox()),
serviceDateValue.getValueDate(), serviceDateValue.getPage());
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link TimeValue}.
*
* @param serviceDateValue The string value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link TimeValue}.
*
*/
private static TimeValue toFieldValueTime(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceDateValue) {
return new TimeValue(serviceDateValue.getText(), toBoundingBox(serviceDateValue.getBoundingBox()),
serviceDateValue.getValueTime(), serviceDateValue.getPage());
}
} |
outside of scope of this PR. we can come back to this later. I fixed double serialization for point read item operation but not the udf, etc. | public Mono<CosmosAsyncUserDefinedFunctionResponse> replace(CosmosUserDefinedFunctionProperties udfSettings) {
return container.getDatabase()
.getDocClientWrapper()
.replaceUserDefinedFunction(new UserDefinedFunction(ModelBridgeInternal.toJsonFromJsonSerializable(udfSettings)), null)
.map(response -> ModelBridgeInternal.createCosmosAsyncUserDefinedFunctionResponse(response, container))
.single();
} | .replaceUserDefinedFunction(new UserDefinedFunction(ModelBridgeInternal.toJsonFromJsonSerializable(udfSettings)), null) | public Mono<CosmosAsyncUserDefinedFunctionResponse> replace(CosmosUserDefinedFunctionProperties udfSettings) {
return container.getDatabase()
.getDocClientWrapper()
.replaceUserDefinedFunction(new UserDefinedFunction(ModelBridgeInternal.toJsonFromJsonSerializable(udfSettings)), null)
.map(response -> ModelBridgeInternal.createCosmosAsyncUserDefinedFunctionResponse(response, container))
.single();
} | class CosmosAsyncUserDefinedFunction {
@SuppressWarnings("EnforceFinalFields")
private final CosmosAsyncContainer container;
private String id;
CosmosAsyncUserDefinedFunction(String id, CosmosAsyncContainer container) {
this.id = id;
this.container = container;
}
/**
* Get the id of the {@link CosmosAsyncUserDefinedFunction}
*
* @return the id of the {@link CosmosAsyncUserDefinedFunction}
*/
public String getId() {
return id;
}
/**
* Set the id of the {@link CosmosAsyncUserDefinedFunction}
*
* @param id the id of the {@link CosmosAsyncUserDefinedFunction}
* @return the same {@link CosmosAsyncUserDefinedFunction} that had the id set
*/
CosmosAsyncUserDefinedFunction setId(String id) {
this.id = id;
return this;
}
/**
* Read a user defined function.
* <p>
* After subscription the operation will be performed.
* The {@link Mono} upon successful completion will contain a single resource response for the read user defined
* function.
* In case of failure the {@link Mono} will error.
*
* @return an {@link Mono} containing the single resource response for the read user defined function or an error.
*/
public Mono<CosmosAsyncUserDefinedFunctionResponse> read() {
return container.getDatabase().getDocClientWrapper().readUserDefinedFunction(getLink(), null)
.map(response -> ModelBridgeInternal.createCosmosAsyncUserDefinedFunctionResponse(response, container)).single();
}
/**
* Replaces a cosmos user defined function.
* <p>
* After subscription the operation will be performed.
* The {@link Mono} upon successful completion will contain a single resource response with the replaced user
* defined function.
* In case of failure the {@link Mono} will error.
*
* @param udfSettings the cosmos user defined function properties.
* @return an {@link Mono} containing the single resource response with the replaced cosmos user defined function
* or an error.
*/
/**
* Deletes a cosmos user defined function.
* <p>
* After subscription the operation will be performed.
* The {@link Mono} upon successful completion will contain a single resource response for the deleted user
* defined function.
* In case of failure the {@link Mono} will error.
*
* @return an {@link Mono} containing the single resource response for the deleted cosmos user defined function or
* an error.
*/
public Mono<CosmosAsyncUserDefinedFunctionResponse> delete() {
return container.getDatabase()
.getDocClientWrapper()
.deleteUserDefinedFunction(this.getLink(), null)
.map(response -> ModelBridgeInternal.createCosmosAsyncUserDefinedFunctionResponse(response, container))
.single();
}
String getURIPathSegment() {
return Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT;
}
String getParentLink() {
return container.getLink();
}
String getLink() {
StringBuilder builder = new StringBuilder();
builder.append(getParentLink());
builder.append("/");
builder.append(getURIPathSegment());
builder.append("/");
builder.append(getId());
return builder.toString();
}
} | class CosmosAsyncUserDefinedFunction {
@SuppressWarnings("EnforceFinalFields")
private final CosmosAsyncContainer container;
private String id;
CosmosAsyncUserDefinedFunction(String id, CosmosAsyncContainer container) {
this.id = id;
this.container = container;
}
/**
* Get the id of the {@link CosmosAsyncUserDefinedFunction}
*
* @return the id of the {@link CosmosAsyncUserDefinedFunction}
*/
public String getId() {
return id;
}
/**
* Set the id of the {@link CosmosAsyncUserDefinedFunction}
*
* @param id the id of the {@link CosmosAsyncUserDefinedFunction}
* @return the same {@link CosmosAsyncUserDefinedFunction} that had the id set
*/
CosmosAsyncUserDefinedFunction setId(String id) {
this.id = id;
return this;
}
/**
* Read a user defined function.
* <p>
* After subscription the operation will be performed.
* The {@link Mono} upon successful completion will contain a single resource response for the read user defined
* function.
* In case of failure the {@link Mono} will error.
*
* @return an {@link Mono} containing the single resource response for the read user defined function or an error.
*/
public Mono<CosmosAsyncUserDefinedFunctionResponse> read() {
return container.getDatabase().getDocClientWrapper().readUserDefinedFunction(getLink(), null)
.map(response -> ModelBridgeInternal.createCosmosAsyncUserDefinedFunctionResponse(response, container)).single();
}
/**
* Replaces a cosmos user defined function.
* <p>
* After subscription the operation will be performed.
* The {@link Mono} upon successful completion will contain a single resource response with the replaced user
* defined function.
* In case of failure the {@link Mono} will error.
*
* @param udfSettings the cosmos user defined function properties.
* @return an {@link Mono} containing the single resource response with the replaced cosmos user defined function
* or an error.
*/
/**
* Deletes a cosmos user defined function.
* <p>
* After subscription the operation will be performed.
* The {@link Mono} upon successful completion will contain a single resource response for the deleted user
* defined function.
* In case of failure the {@link Mono} will error.
*
* @return an {@link Mono} containing the single resource response for the deleted cosmos user defined function or
* an error.
*/
public Mono<CosmosAsyncUserDefinedFunctionResponse> delete() {
return container.getDatabase()
.getDocClientWrapper()
.deleteUserDefinedFunction(this.getLink(), null)
.map(response -> ModelBridgeInternal.createCosmosAsyncUserDefinedFunctionResponse(response, container))
.single();
}
String getURIPathSegment() {
return Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT;
}
String getParentLink() {
return container.getLink();
}
String getLink() {
StringBuilder builder = new StringBuilder();
builder.append(getParentLink());
builder.append("/");
builder.append(getURIPathSegment());
builder.append("/");
builder.append(getId());
return builder.toString();
}
} |
The service will only accept files of 20MB, so anything larger is an exception, but nothing is stopping me from sends a 2GB file by accident. | private static ByteBuffer toByteArray(InputStream in) throws IOException {
ByteArrayOutputStream os = new ByteArrayOutputStream();
byte[] buffer = new byte[1024];
int len;
while ((len = in.read(buffer)) != -1) {
os.write(buffer, 0, len);
}
return ByteBuffer.wrap(os.toByteArray());
} | } | private static ByteBuffer toByteArray(InputStream in) {
try {
ByteArrayOutputStream os = new ByteArrayOutputStream();
byte[] buffer = new byte[1024];
int len;
while ((len = in.read(buffer)) != -1) {
os.write(buffer, 0, len);
}
return ByteBuffer.wrap(os.toByteArray());
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
/**
* A utility method for converting the input stream to Flux of ByteBuffer.
*
* @param data The input data which needs to convert to ByteBuffer.
*
* @return {@link ByteBuffer} which contains the input data.
* @throws RuntimeException When I/O error occurs.
*/
public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data) {
try {
return Flux.just(toByteArray(data));
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
private Utility() {
}
/**
* A utility method for converting the input stream to Flux of ByteBuffer.
*
* @param data The input data which needs to convert to ByteBuffer.
*
* @return {@link ByteBuffer} which contains the input data.
* @throws RuntimeException When I/O error occurs.
*/
public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data) {
return Flux.just(toByteArray(data))
.doOnError(error -> LOGGER.warning("Failed to convert stream to byte array - {}", error));
}
} |
Updating this implementation in #9690 | private static ByteBuffer toByteArray(InputStream in) throws IOException {
ByteArrayOutputStream os = new ByteArrayOutputStream();
byte[] buffer = new byte[1024];
int len;
while ((len = in.read(buffer)) != -1) {
os.write(buffer, 0, len);
}
return ByteBuffer.wrap(os.toByteArray());
} | } | private static ByteBuffer toByteArray(InputStream in) {
try {
ByteArrayOutputStream os = new ByteArrayOutputStream();
byte[] buffer = new byte[1024];
int len;
while ((len = in.read(buffer)) != -1) {
os.write(buffer, 0, len);
}
return ByteBuffer.wrap(os.toByteArray());
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
/**
* A utility method for converting the input stream to Flux of ByteBuffer.
*
* @param data The input data which needs to convert to ByteBuffer.
*
* @return {@link ByteBuffer} which contains the input data.
* @throws RuntimeException When I/O error occurs.
*/
public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data) {
try {
return Flux.just(toByteArray(data));
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
} | class Utility {
private static final ClientLogger LOGGER = new ClientLogger(Utility.class);
private Utility() {
}
/**
* A utility method for converting the input stream to Flux of ByteBuffer.
*
* @param data The input data which needs to convert to ByteBuffer.
*
* @return {@link ByteBuffer} which contains the input data.
* @throws RuntimeException When I/O error occurs.
*/
public static Flux<ByteBuffer> convertStreamToByteBuffer(InputStream data) {
return Flux.just(toByteArray(data))
.doOnError(error -> LOGGER.warning("Failed to convert stream to byte array - {}", error));
}
} |
The leftover keys would be put in a `Map<String, FieldValue> extractedFields` to still be able to capture them and not ignore. >if the known set of keys are limited and new keys are not added frequently. The service currently wants to have the ability to add fields in between service updates. These would be made available to the user on the `Map of FieldValue (extractedFields)` and move to become new fields like merchantName, merchantAddress etc in further client releases. | static IterableStream<ExtractedReceipt> toReceipt(AnalyzeResult analyzeResult, boolean includeTextDetails) {
List<ReadResult> readResults = analyzeResult.getReadResults();
List<DocumentResult> documentResult = analyzeResult.getDocumentResults();
List<ExtractedReceipt> extractedReceiptList = new ArrayList<>();
for (int i = 0; i < readResults.size(); i++) {
ReadResult readResultItem = readResults.get(i);
PageMetadata pageMetadata = getPageInfo(readResultItem);
PageRange pageRange = null;
DocumentResult documentResultItem = documentResult.get(i);
List<Integer> receiptPageRange = documentResultItem.getPageRange();
if (receiptPageRange.size() == 2) {
pageRange = new PageRange(receiptPageRange.get(0), receiptPageRange.get(1));
}
ExtractedReceipt extractedReceiptItem = new ExtractedReceipt(pageMetadata, pageRange);
documentResultItem.getFields().forEach((key, fieldValue) -> {
switch (key) {
case "ReceiptType":
extractedReceiptItem.setReceiptType(new ReceiptType(fieldValue.getValueString(),
fieldValue.getConfidence()));
break;
case "MerchantName":
extractedReceiptItem.setMerchantName(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "MerchantAddress":
extractedReceiptItem.setMerchantAddress(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "MerchantPhoneNumber":
extractedReceiptItem.setMerchantPhoneNumber(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Subtotal":
extractedReceiptItem.setSubtotal(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Tax":
extractedReceiptItem.setTax(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Tip":
extractedReceiptItem.setTip(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Total":
extractedReceiptItem.setTotal(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "TransactionDate":
extractedReceiptItem.setTransactionDate(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "TransactionTime":
extractedReceiptItem.setTransactionTime(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Items":
extractedReceiptItem.setReceiptItems(toReceiptItems(fieldValue.getValueArray(), readResults, includeTextDetails));
break;
default:
break;
}
});
extractedReceiptList.add(extractedReceiptItem);
}
return new IterableStream<>(extractedReceiptList);
} | break; | static IterableStream<ExtractedReceipt> toReceipt(AnalyzeResult analyzeResult, boolean includeTextDetails) {
List<ReadResult> readResults = analyzeResult.getReadResults();
List<DocumentResult> documentResult = analyzeResult.getDocumentResults();
List<ExtractedReceipt> extractedReceiptList = new ArrayList<>();
for (int i = 0; i < readResults.size(); i++) {
ReadResult readResultItem = readResults.get(i);
PageMetadata pageMetadata = getPageInfo(readResultItem);
PageRange pageRange = null;
DocumentResult documentResultItem = documentResult.get(i);
List<Integer> receiptPageRange = documentResultItem.getPageRange();
if (receiptPageRange.size() == 2) {
pageRange = new PageRange(receiptPageRange.get(0), receiptPageRange.get(1));
}
ExtractedReceipt extractedReceiptItem = new ExtractedReceipt(pageMetadata, pageRange);
Map<String, FieldValue<?>> extractedFieldMap = new HashMap<>();
documentResultItem.getFields().forEach((key, fieldValue) -> {
switch (key) {
case "ReceiptType":
extractedReceiptItem.setReceiptType(new ReceiptType(fieldValue.getValueString(),
fieldValue.getConfidence()));
break;
case "MerchantName":
extractedReceiptItem.setMerchantName(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "MerchantAddress":
extractedReceiptItem.setMerchantAddress(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "MerchantPhoneNumber":
extractedReceiptItem.setMerchantPhoneNumber(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Subtotal":
extractedReceiptItem.setSubtotal(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Tax":
extractedReceiptItem.setTax(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Tip":
extractedReceiptItem.setTip(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Total":
extractedReceiptItem.setTotal(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "TransactionDate":
extractedReceiptItem.setTransactionDate(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "TransactionTime":
extractedReceiptItem.setTransactionTime(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Items":
extractedReceiptItem.setReceiptItems(toReceiptItems(fieldValue.getValueArray(), readResults, includeTextDetails));
break;
default:
extractedFieldMap.putIfAbsent(key, setFieldValue(fieldValue, readResults, includeTextDetails));
break;
}
});
extractedReceiptItem.setExtractedFields(extractedFieldMap);
extractedReceiptList.add(extractedReceiptItem);
}
return new IterableStream<>(extractedReceiptList);
} | class Transforms {
private static final ClientLogger LOGGER = new ClientLogger(Transforms.class);
private static final Pattern COMPILE = Pattern.compile("[^0-9]+");
private Transforms() {
}
/**
* Helper method to convert the {@link com.azure.ai.formrecognizer.implementation.models.AnalyzeOperationResult}
* service level receipt model to list of {@link ExtractedReceipt}.
*
* @param analyzeResult The result of the analyze receipt operation returned by the service.
* @param includeTextDetails When set to true, a list of references to the text elements is returned in the read result.
*
* @return A list of {@link ExtractedReceipt} to represent the list of extracted receipt information.
*/
/**
* Helper method that converts the incoming service field value to one of the strongly typed SDK level {@link FieldValue} with
* reference elements set when {@code includeTextDetails} is set to true.
*
* @param fieldValue The named field values returned by the service.
* @param readResults The result containing the list of element references when includeTextDetails is set to true.
* @param includeTextDetails When set to true, a list of references to the text elements is returned in the read result.
*
* @return The strongly typed {@link FieldValue} for the field input.
*/
private static FieldValue<?> setFieldValue(com.azure.ai.formrecognizer.implementation.models.FieldValue fieldValue,
List<ReadResult> readResults, boolean includeTextDetails) {
FieldValue<?> value;
switch (fieldValue.getType()) {
case PHONE_NUMBER:
case STRING:
case TIME:
case DATE:
value = toFieldValueString(fieldValue);
break;
case INTEGER:
value = toFieldValueInteger(fieldValue);
break;
case NUMBER:
value = toFieldValueNumber(fieldValue);
break;
case ARRAY:
case OBJECT:
default:
throw LOGGER.logExceptionAsError(new RuntimeException("FieldValue Type not supported"));
}
if (includeTextDetails) {
value.setElements(setReferenceElements(readResults, fieldValue.getElements()));
}
return value;
}
/**
* Helper method that converts the service returned page information to SDK model {@link PageMetadata}.
*
* @param readResultItem A read result item returned from the service containing the page information for provided
* input.
*
* @return The {@link PageMetadata} for the receipt page.
*/
private static PageMetadata getPageInfo(ReadResult readResultItem) {
return new PageMetadata(TextLanguage.fromString(readResultItem.getLanguage().toString()),
readResultItem.getHeight(), readResultItem.getPage(), readResultItem.getWidth(),
readResultItem.getAngle(), DimensionUnit.fromString(readResultItem.getUnit().toString()));
}
/**
* Helper method to set the text reference elements on FieldValue/fields when {@code includeTextDetails} set to true.
*
* @param readResults The ReadResult containing the resolved references for text elements.
* @param elements When includeTextDetails is set to true, a list of references to the text
* elements constituting this field value.
*
* @return The updated {@link FieldValue} object with list if referenced elements.
*/
private static List<Element> setReferenceElements(List<ReadResult> readResults, List<String> elements) {
List<Element> elementList = new ArrayList<>();
elements.forEach(elementString -> {
String[] indices = COMPILE.matcher(elementString).replaceAll(" ").trim().split(" ");
int readResultIndex, lineIndex;
if (indices.length >= 1) {
readResultIndex = Integer.parseInt(indices[0]);
lineIndex = Integer.parseInt(indices[1]);
} else {
throw LOGGER.logExceptionAsError(new RuntimeException("Reference Elements not found"));
}
if (indices.length == 3) {
int wordIndex = Integer.parseInt(indices[2]);
TextWord textWord = readResults.get(readResultIndex).getLines().get(lineIndex).getWords()
.get(wordIndex);
WordElement wordElement = new WordElement(textWord.getText(), toBoundingBox(textWord.getBoundingBox()));
elementList.add(wordElement);
} else {
TextLine textLine = readResults.get(readResultIndex).getLines().get(lineIndex);
LineElement lineElement = new LineElement(textLine.getText(), toBoundingBox(textLine.getBoundingBox()));
elementList.add(lineElement);
}
});
return elementList;
}
/**
* Helper method to convert the service level modeled eight numbers representing the four points to SDK level
* {@link BoundingBox}.
*
* @param boundingBox A list of eight numbers representing the four points of a box.
*
* @return A {@link BoundingBox}.
*/
private static BoundingBox toBoundingBox(List<Float> boundingBox) {
List<Point> pointList = new ArrayList<>();
for (int i = 0; i < boundingBox.size(); i += 2) {
Point point = new Point(boundingBox.get(i), boundingBox.get(i + 1));
pointList.add(point);
}
return new BoundingBox(pointList);
}
/**
* Helper method to convert the service level {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to SDK level {@link ReceiptItem receipt items}.
*
* @param fieldValue The named field values returned by the service.
* @param readResults The result containing the list of element references when includeTextDetails is set to true.
* @param includeTextDetails When set to true, a list of references to the text elements is returned in the read result.
*
* @return A list of {@link ReceiptItem}.
*/
private static List<ReceiptItem> toReceiptItems(
List<com.azure.ai.formrecognizer.implementation.models.FieldValue> fieldValue, List<ReadResult> readResults, boolean includeTextDetails) {
List<ReceiptItem> receiptItemList = new ArrayList<>();
fieldValue.forEach(fieldValue1 -> {
ReceiptItem receiptItem = new ReceiptItem();
fieldValue1.getValueObject().forEach((key, fieldValue2) -> {
switch (key) {
case "Quantity":
receiptItem.setQuantity(setFieldValue(fieldValue2, readResults, includeTextDetails));
break;
case "Name":
receiptItem.setName(setFieldValue(fieldValue2, readResults, includeTextDetails));
break;
case "TotalPrice":
receiptItem.setTotalPrice(setFieldValue(fieldValue2, readResults, includeTextDetails));
break;
default:
break;
}
});
receiptItemList.add(receiptItem);
});
return receiptItemList;
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link IntegerValue}
*
* @param serviceIntegerValue The integer value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link IntegerValue}.
*/
private static IntegerValue toFieldValueInteger(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceIntegerValue) {
if (serviceIntegerValue.getValueNumber() != null) {
return new IntegerValue(serviceIntegerValue.getText(), toBoundingBox(serviceIntegerValue.getBoundingBox()),
serviceIntegerValue.getValueInteger(), serviceIntegerValue.getPage());
}
return new IntegerValue(serviceIntegerValue.getText(), toBoundingBox(serviceIntegerValue.getBoundingBox()), 0, serviceIntegerValue.getPage());
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link StringValue}.
*
* @param serviceStringValue The string value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link StringValue}.
*/
private static StringValue toFieldValueString(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceStringValue) {
return new StringValue(serviceStringValue.getText(), toBoundingBox(serviceStringValue.getBoundingBox()),
serviceStringValue.getValueString(), serviceStringValue.getPage());
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link FloatValue}.
*
* @param serviceFloatValue The float value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link FloatValue}.
*/
private static FloatValue toFieldValueNumber(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceFloatValue) {
return new FloatValue(serviceFloatValue.getText(), toBoundingBox(serviceFloatValue.getBoundingBox()),
serviceFloatValue.getValueNumber(), serviceFloatValue.getPage());
}
} | class Transforms {
private static final ClientLogger LOGGER = new ClientLogger(Transforms.class);
private static final Pattern COMPILE = Pattern.compile("[^0-9]+");
private Transforms() {
}
/**
* Helper method to convert the {@link com.azure.ai.formrecognizer.implementation.models.AnalyzeOperationResult}
* service level receipt model to list of {@link ExtractedReceipt}.
*
* @param analyzeResult The result of the analyze receipt operation returned by the service.
* @param includeTextDetails When set to true, a list of references to the text elements is returned in the read result.
*
* @return A list of {@link ExtractedReceipt} to represent the list of extracted receipt information.
*/
/**
* Helper method that converts the incoming service field value to one of the strongly typed SDK level {@link FieldValue} with
* reference elements set when {@code includeTextDetails} is set to true.
*
* @param fieldValue The named field values returned by the service.
* @param readResults The result containing the list of element references when includeTextDetails is set to true.
* @param includeTextDetails When set to true, a list of references to the text elements is returned in the read result.
*
* @return The strongly typed {@link FieldValue} for the field input.
*/
private static FieldValue<?> setFieldValue(com.azure.ai.formrecognizer.implementation.models.FieldValue fieldValue,
List<ReadResult> readResults, boolean includeTextDetails) {
FieldValue<?> value;
switch (fieldValue.getType()) {
case PHONE_NUMBER:
value = toFieldValuePhoneNumber(fieldValue);
break;
case STRING:
value = toFieldValueString(fieldValue);
break;
case TIME:
value = toFieldValueTime(fieldValue);
break;
case DATE:
value = toFieldValueDate(fieldValue);
break;
case INTEGER:
value = toFieldValueInteger(fieldValue);
break;
case NUMBER:
value = toFieldValueNumber(fieldValue);
break;
case ARRAY:
case OBJECT:
default:
throw LOGGER.logExceptionAsError(new RuntimeException("FieldValue Type not supported"));
}
if (includeTextDetails) {
value.setElements(setReferenceElements(readResults, fieldValue.getElements()));
}
return value;
}
/**
* Helper method that converts the service returned page information to SDK model {@link PageMetadata}.
*
* @param readResultItem A read result item returned from the service containing the page information for provided
* input.
*
* @return The {@link PageMetadata} for the receipt page.
*/
private static PageMetadata getPageInfo(ReadResult readResultItem) {
return new PageMetadata(readResultItem.getHeight(), readResultItem.getPage(), readResultItem.getWidth(),
readResultItem.getAngle(), DimensionUnit.fromString(readResultItem.getUnit().toString()));
}
/**
* Helper method to set the text reference elements on FieldValue/fields when {@code includeTextDetails} set to true.
*
* @param readResults The ReadResult containing the resolved references for text elements.
* @param elements When includeTextDetails is set to true, a list of references to the text
* elements constituting this field value.
*
* @return The updated {@link FieldValue} object with list if referenced elements.
*/
private static List<Element> setReferenceElements(List<ReadResult> readResults, List<String> elements) {
List<Element> elementList = new ArrayList<>();
elements.forEach(elementString -> {
String[] indices = COMPILE.matcher(elementString).replaceAll(" ").trim().split(" ");
int readResultIndex, lineIndex;
if (indices.length >= 1) {
readResultIndex = Integer.parseInt(indices[0]);
lineIndex = Integer.parseInt(indices[1]);
} else {
throw LOGGER.logExceptionAsError(new RuntimeException("Reference Elements not found"));
}
if (indices.length == 3) {
int wordIndex = Integer.parseInt(indices[2]);
TextWord textWord = readResults.get(readResultIndex).getLines().get(lineIndex).getWords()
.get(wordIndex);
WordElement wordElement = new WordElement(textWord.getText(), toBoundingBox(textWord.getBoundingBox()));
elementList.add(wordElement);
} else {
TextLine textLine = readResults.get(readResultIndex).getLines().get(lineIndex);
LineElement lineElement = new LineElement(textLine.getText(), toBoundingBox(textLine.getBoundingBox()));
elementList.add(lineElement);
}
});
return elementList;
}
/**
* Helper method to convert the service level modeled eight numbers representing the four points to SDK level
* {@link BoundingBox}.
*
* @param boundingBox A list of eight numbers representing the four points of a box.
*
* @return A {@link BoundingBox}.
*/
private static BoundingBox toBoundingBox(List<Float> boundingBox) {
BoundingBox boundingBox1;
if (boundingBox.size() == 8) {
Point topLeft = new Point(boundingBox.get(0), boundingBox.get(1));
Point topRight = new Point(boundingBox.get(2), boundingBox.get(3));
Point bottomLeft = new Point(boundingBox.get(4), boundingBox.get(5));
Point bottomRight = new Point(boundingBox.get(6), boundingBox.get(7));
boundingBox1 = new BoundingBox(topLeft, topRight, bottomLeft, bottomRight);
} else {
return null;
}
return boundingBox1;
}
/**
* Helper method to convert the service level {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to SDK level {@link ReceiptItem receipt items}.
*
* @param fieldValueItems The named field values returned by the service.
* @param readResults The result containing the list of element references when includeTextDetails is set to true.
* @param includeTextDetails When set to true, a list of references to the text elements is returned in the read result.
*
* @return A list of {@link ReceiptItem}.
*/
private static List<ReceiptItem> toReceiptItems(
List<com.azure.ai.formrecognizer.implementation.models.FieldValue> fieldValueItems, List<ReadResult> readResults, boolean includeTextDetails) {
List<ReceiptItem> receiptItemList = new ArrayList<>();
for (com.azure.ai.formrecognizer.implementation.models.FieldValue eachFieldValue : fieldValueItems) {
ReceiptItem receiptItem = new ReceiptItem();
for (ReceiptItemType key : ReceiptItemType.values()) {
com.azure.ai.formrecognizer.implementation.models.FieldValue item = eachFieldValue.getValueObject().get(key.toString());
if (QUANTITY.equals(key) && item != null) {
receiptItem.setQuantity(setFieldValue(item, readResults, includeTextDetails));
} else if (NAME.equals(key) && item != null) {
receiptItem.setName(setFieldValue(item, readResults, includeTextDetails));
} else if (PRICE.equals(key) && item != null) {
receiptItem.setPrice(setFieldValue(item, readResults, includeTextDetails));
} else if (TOTAL_PRICE.equals(key) && item != null) {
receiptItem.setTotalPrice(setFieldValue(item, readResults, includeTextDetails));
}
}
receiptItemList.add(receiptItem);
}
return receiptItemList;
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link IntegerValue}
*
* @param serviceIntegerValue The integer value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link IntegerValue}.
*/
private static IntegerValue toFieldValueInteger(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceIntegerValue) {
if (serviceIntegerValue.getValueNumber() != null) {
return new IntegerValue(serviceIntegerValue.getText(), toBoundingBox(serviceIntegerValue.getBoundingBox()),
serviceIntegerValue.getValueInteger(), serviceIntegerValue.getPage());
}
return new IntegerValue(serviceIntegerValue.getText(), toBoundingBox(serviceIntegerValue.getBoundingBox()), null, serviceIntegerValue.getPage());
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link StringValue}.
*
* @param serviceStringValue The string value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link StringValue}.
*/
private static StringValue toFieldValueString(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceStringValue) {
return new StringValue(serviceStringValue.getText(), toBoundingBox(serviceStringValue.getBoundingBox()),
serviceStringValue.getValueString(), serviceStringValue.getPage());
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link FloatValue}.
*
* @param serviceFloatValue The float value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link FloatValue}.
*/
private static FloatValue toFieldValueNumber(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceFloatValue) {
if (serviceFloatValue.getValueNumber() != null) {
return new FloatValue(serviceFloatValue.getText(), toBoundingBox(serviceFloatValue.getBoundingBox()),
serviceFloatValue.getValueNumber(), serviceFloatValue.getPage());
}
return new FloatValue(serviceFloatValue.getText(), toBoundingBox(serviceFloatValue.getBoundingBox()), null, serviceFloatValue.getPage());
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link StringValue}.
*
* @param serviceDateValue The string value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link StringValue}.
*/
private static StringValue toFieldValuePhoneNumber(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceDateValue) {
return new StringValue(serviceDateValue.getText(), toBoundingBox(serviceDateValue.getBoundingBox()),
serviceDateValue.getValuePhoneNumber(), serviceDateValue.getPage());
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link DateValue}.
*
* @param serviceDateValue The string value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link StringValue}.
*/
private static DateValue toFieldValueDate(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceDateValue) {
return new DateValue(serviceDateValue.getText(), toBoundingBox(serviceDateValue.getBoundingBox()),
serviceDateValue.getValueDate(), serviceDateValue.getPage());
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link TimeValue}.
*
* @param serviceDateValue The string value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link TimeValue}.
*
*/
private static TimeValue toFieldValueTime(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceDateValue) {
return new TimeValue(serviceDateValue.getText(), toBoundingBox(serviceDateValue.getBoundingBox()),
serviceDateValue.getValueTime(), serviceDateValue.getPage());
}
} |
Can the keys be converted to expandable enums? | static IterableStream<ExtractedReceipt> toReceipt(AnalyzeResult analyzeResult, boolean includeTextDetails) {
List<ReadResult> readResults = analyzeResult.getReadResults();
List<DocumentResult> documentResult = analyzeResult.getDocumentResults();
List<ExtractedReceipt> extractedReceiptList = new ArrayList<>();
for (int i = 0; i < readResults.size(); i++) {
ReadResult readResultItem = readResults.get(i);
PageMetadata pageMetadata = getPageInfo(readResultItem);
ExtractedReceipt extractedReceiptItem = new ExtractedReceipt(pageMetadata);
DocumentResult documentResultItem = documentResult.get(i);
documentResultItem.getFields().forEach((key, fieldValue) -> {
switch (key) {
case "ReceiptType":
extractedReceiptItem.setReceiptType(new ReceiptType(fieldValue.getValueString(),
fieldValue.getConfidence()));
break;
case "MerchantName":
extractedReceiptItem.setMerchantName(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "MerchantAddress":
extractedReceiptItem.setMerchantAddress(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "MerchantPhoneNumber":
extractedReceiptItem.setMerchantPhoneNumber(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Subtotal":
extractedReceiptItem.setSubtotal(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Tax":
extractedReceiptItem.setTax(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Tip":
extractedReceiptItem.setTip(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Total":
extractedReceiptItem.setTotal(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "TransactionDate":
extractedReceiptItem.setTransactionDate(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "TransactionTime":
extractedReceiptItem.setTransactionTime(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Items":
extractedReceiptItem.setReceiptItems(toReceiptItems(fieldValue.getValueArray(), readResults, includeTextDetails));
break;
default:
break;
}
});
extractedReceiptList.add(extractedReceiptItem);
}
return new IterableStream<>(extractedReceiptList);
} | break; | static IterableStream<ExtractedReceipt> toReceipt(AnalyzeResult analyzeResult, boolean includeTextDetails) {
List<ReadResult> readResults = analyzeResult.getReadResults();
List<DocumentResult> documentResult = analyzeResult.getDocumentResults();
List<ExtractedReceipt> extractedReceiptList = new ArrayList<>();
for (int i = 0; i < readResults.size(); i++) {
ReadResult readResultItem = readResults.get(i);
PageMetadata pageMetadata = getPageInfo(readResultItem);
PageRange pageRange = null;
DocumentResult documentResultItem = documentResult.get(i);
List<Integer> receiptPageRange = documentResultItem.getPageRange();
if (receiptPageRange.size() == 2) {
pageRange = new PageRange(receiptPageRange.get(0), receiptPageRange.get(1));
}
ExtractedReceipt extractedReceiptItem = new ExtractedReceipt(pageMetadata, pageRange);
Map<String, FieldValue<?>> extractedFieldMap = new HashMap<>();
documentResultItem.getFields().forEach((key, fieldValue) -> {
switch (key) {
case "ReceiptType":
extractedReceiptItem.setReceiptType(new ReceiptType(fieldValue.getValueString(),
fieldValue.getConfidence()));
break;
case "MerchantName":
extractedReceiptItem.setMerchantName(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "MerchantAddress":
extractedReceiptItem.setMerchantAddress(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "MerchantPhoneNumber":
extractedReceiptItem.setMerchantPhoneNumber(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Subtotal":
extractedReceiptItem.setSubtotal(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Tax":
extractedReceiptItem.setTax(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Tip":
extractedReceiptItem.setTip(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Total":
extractedReceiptItem.setTotal(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "TransactionDate":
extractedReceiptItem.setTransactionDate(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "TransactionTime":
extractedReceiptItem.setTransactionTime(setFieldValue(fieldValue, readResults, includeTextDetails));
break;
case "Items":
extractedReceiptItem.setReceiptItems(toReceiptItems(fieldValue.getValueArray(), readResults, includeTextDetails));
break;
default:
extractedFieldMap.putIfAbsent(key, setFieldValue(fieldValue, readResults, includeTextDetails));
break;
}
});
extractedReceiptItem.setExtractedFields(extractedFieldMap);
extractedReceiptList.add(extractedReceiptItem);
}
return new IterableStream<>(extractedReceiptList);
} | class Transforms {
private static final ClientLogger LOGGER = new ClientLogger(Transforms.class);
private static final Pattern COMPILE = Pattern.compile("[^0-9]+");
private Transforms() {
}
private static FieldValue<?> setFieldValue(com.azure.ai.formrecognizer.implementation.models.FieldValue fieldValue,
List<ReadResult> readResults, boolean includeTextDetails) {
FieldValue<?> value;
switch (fieldValue.getType()) {
case PHONE_NUMBER:
case STRING:
case TIME:
case DATE:
value = toFieldValueString(fieldValue);
break;
case INTEGER:
value = toFieldValueInteger(fieldValue);
break;
case NUMBER:
value = toFieldValueNumber(fieldValue);
break;
case ARRAY:
case OBJECT:
default:
throw LOGGER.logExceptionAsError(new RuntimeException("FieldValue Type not supported"));
}
if (includeTextDetails) {
value.setElements(setReferenceElements(readResults, fieldValue.getElements()));
}
return value;
}
private static PageMetadata getPageInfo(ReadResult readResultItem) {
return new PageMetadata(TextLanguage.fromString(readResultItem.getLanguage().toString()),
readResultItem.getHeight(), readResultItem.getPage(), readResultItem.getWidth(),
readResultItem.getAngle(), DimensionUnit.fromString(readResultItem.getUnit().toString()));
}
private static List<Element> setReferenceElements(List<ReadResult> readResults, List<String> elements) {
List<Element> elementList = new ArrayList<>();
elements.forEach(elementString -> {
String[] indices = COMPILE.matcher(elementString).replaceAll(" ").trim().split(" ");
int readResultIndex, lineIndex;
if (indices.length >= 1) {
readResultIndex = Integer.parseInt(indices[0]);
lineIndex = Integer.parseInt(indices[1]);
} else {
throw LOGGER.logExceptionAsError(new RuntimeException("Reference Elements not found"));
}
if (indices.length == 3) {
int wordIndex = Integer.parseInt(indices[2]);
TextWord textWord = readResults.get(readResultIndex).getLines().get(lineIndex).getWords()
.get(wordIndex);
WordElement wordElement = new WordElement(textWord.getText(), toBoundingBox(textWord.getBoundingBox()));
elementList.add(wordElement);
} else {
TextLine textLine = readResults.get(readResultIndex).getLines().get(lineIndex);
LineElement lineElement = new LineElement(textLine.getText(), toBoundingBox(textLine.getBoundingBox()));
elementList.add(lineElement);
}
});
return elementList;
}
private static BoundingBox toBoundingBox(List<Float> boundingBox) {
List<Point> pointList = new ArrayList<>();
for (int i = 0; i < boundingBox.size(); i += 2) {
Point point = new Point(boundingBox.get(i), boundingBox.get(i + 1));
pointList.add(point);
}
return new BoundingBox(pointList);
}
private static List<ReceiptItem> toReceiptItems(
List<com.azure.ai.formrecognizer.implementation.models.FieldValue> fieldValue, List<ReadResult> readResults, boolean includeTextDetails) {
List<ReceiptItem> receiptItemList = new ArrayList<>();
fieldValue.forEach(fieldValue1 -> {
ReceiptItem receiptItem = new ReceiptItem();
fieldValue1.getValueObject().forEach((key, fieldValue2) -> {
switch (key) {
case "Quantity":
receiptItem.setQuantity(setFieldValue(fieldValue2, readResults, includeTextDetails));
break;
case "Name":
receiptItem.setName(setFieldValue(fieldValue2, readResults, includeTextDetails));
break;
case "TotalPrice":
receiptItem.setTotalPrice(setFieldValue(fieldValue2, readResults, includeTextDetails));
break;
default:
break;
}
});
receiptItemList.add(receiptItem);
});
return receiptItemList;
}
private static IntegerValue toFieldValueInteger(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceIntegerValue) {
if (serviceIntegerValue.getValueNumber() != null) {
return new IntegerValue(serviceIntegerValue.getText(), toBoundingBox(serviceIntegerValue.getBoundingBox()),
serviceIntegerValue.getValueInteger());
}
return new IntegerValue(serviceIntegerValue.getText(), toBoundingBox(serviceIntegerValue.getBoundingBox()), 0);
}
private static StringValue toFieldValueString(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceStringValue) {
return new StringValue(serviceStringValue.getText(), toBoundingBox(serviceStringValue.getBoundingBox()),
serviceStringValue.getValueString());
}
private static FloatValue toFieldValueNumber(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceFloatValue) {
return new FloatValue(serviceFloatValue.getText(), toBoundingBox(serviceFloatValue.getBoundingBox()),
serviceFloatValue.getValueNumber());
}
} | class Transforms {
private static final ClientLogger LOGGER = new ClientLogger(Transforms.class);
private static final Pattern COMPILE = Pattern.compile("[^0-9]+");
private Transforms() {
}
/**
* Helper method to convert the {@link com.azure.ai.formrecognizer.implementation.models.AnalyzeOperationResult}
* service level receipt model to list of {@link ExtractedReceipt}.
*
* @param analyzeResult The result of the analyze receipt operation returned by the service.
* @param includeTextDetails When set to true, a list of references to the text elements is returned in the read result.
*
* @return A list of {@link ExtractedReceipt} to represent the list of extracted receipt information.
*/
/**
* Helper method that converts the incoming service field value to one of the strongly typed SDK level {@link FieldValue} with
* reference elements set when {@code includeTextDetails} is set to true.
*
* @param fieldValue The named field values returned by the service.
* @param readResults The result containing the list of element references when includeTextDetails is set to true.
* @param includeTextDetails When set to true, a list of references to the text elements is returned in the read result.
*
* @return The strongly typed {@link FieldValue} for the field input.
*/
private static FieldValue<?> setFieldValue(com.azure.ai.formrecognizer.implementation.models.FieldValue fieldValue,
List<ReadResult> readResults, boolean includeTextDetails) {
FieldValue<?> value;
switch (fieldValue.getType()) {
case PHONE_NUMBER:
value = toFieldValuePhoneNumber(fieldValue);
break;
case STRING:
value = toFieldValueString(fieldValue);
break;
case TIME:
value = toFieldValueTime(fieldValue);
break;
case DATE:
value = toFieldValueDate(fieldValue);
break;
case INTEGER:
value = toFieldValueInteger(fieldValue);
break;
case NUMBER:
value = toFieldValueNumber(fieldValue);
break;
case ARRAY:
case OBJECT:
default:
throw LOGGER.logExceptionAsError(new RuntimeException("FieldValue Type not supported"));
}
if (includeTextDetails) {
value.setElements(setReferenceElements(readResults, fieldValue.getElements()));
}
return value;
}
/**
* Helper method that converts the service returned page information to SDK model {@link PageMetadata}.
*
* @param readResultItem A read result item returned from the service containing the page information for provided
* input.
*
* @return The {@link PageMetadata} for the receipt page.
*/
private static PageMetadata getPageInfo(ReadResult readResultItem) {
return new PageMetadata(readResultItem.getHeight(), readResultItem.getPage(), readResultItem.getWidth(),
readResultItem.getAngle(), DimensionUnit.fromString(readResultItem.getUnit().toString()));
}
/**
* Helper method to set the text reference elements on FieldValue/fields when {@code includeTextDetails} set to true.
*
* @param readResults The ReadResult containing the resolved references for text elements.
* @param elements When includeTextDetails is set to true, a list of references to the text
* elements constituting this field value.
*
* @return The updated {@link FieldValue} object with list if referenced elements.
*/
private static List<Element> setReferenceElements(List<ReadResult> readResults, List<String> elements) {
List<Element> elementList = new ArrayList<>();
elements.forEach(elementString -> {
String[] indices = COMPILE.matcher(elementString).replaceAll(" ").trim().split(" ");
int readResultIndex, lineIndex;
if (indices.length >= 1) {
readResultIndex = Integer.parseInt(indices[0]);
lineIndex = Integer.parseInt(indices[1]);
} else {
throw LOGGER.logExceptionAsError(new RuntimeException("Reference Elements not found"));
}
if (indices.length == 3) {
int wordIndex = Integer.parseInt(indices[2]);
TextWord textWord = readResults.get(readResultIndex).getLines().get(lineIndex).getWords()
.get(wordIndex);
WordElement wordElement = new WordElement(textWord.getText(), toBoundingBox(textWord.getBoundingBox()));
elementList.add(wordElement);
} else {
TextLine textLine = readResults.get(readResultIndex).getLines().get(lineIndex);
LineElement lineElement = new LineElement(textLine.getText(), toBoundingBox(textLine.getBoundingBox()));
elementList.add(lineElement);
}
});
return elementList;
}
/**
* Helper method to convert the service level modeled eight numbers representing the four points to SDK level
* {@link BoundingBox}.
*
* @param boundingBox A list of eight numbers representing the four points of a box.
*
* @return A {@link BoundingBox}.
*/
private static BoundingBox toBoundingBox(List<Float> boundingBox) {
BoundingBox boundingBox1;
if (boundingBox.size() == 8) {
Point topLeft = new Point(boundingBox.get(0), boundingBox.get(1));
Point topRight = new Point(boundingBox.get(2), boundingBox.get(3));
Point bottomLeft = new Point(boundingBox.get(4), boundingBox.get(5));
Point bottomRight = new Point(boundingBox.get(6), boundingBox.get(7));
boundingBox1 = new BoundingBox(topLeft, topRight, bottomLeft, bottomRight);
} else {
return null;
}
return boundingBox1;
}
/**
* Helper method to convert the service level {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to SDK level {@link ReceiptItem receipt items}.
*
* @param fieldValueItems The named field values returned by the service.
* @param readResults The result containing the list of element references when includeTextDetails is set to true.
* @param includeTextDetails When set to true, a list of references to the text elements is returned in the read result.
*
* @return A list of {@link ReceiptItem}.
*/
private static List<ReceiptItem> toReceiptItems(
List<com.azure.ai.formrecognizer.implementation.models.FieldValue> fieldValueItems, List<ReadResult> readResults, boolean includeTextDetails) {
List<ReceiptItem> receiptItemList = new ArrayList<>();
for (com.azure.ai.formrecognizer.implementation.models.FieldValue eachFieldValue : fieldValueItems) {
ReceiptItem receiptItem = new ReceiptItem();
for (ReceiptItemType key : ReceiptItemType.values()) {
com.azure.ai.formrecognizer.implementation.models.FieldValue item = eachFieldValue.getValueObject().get(key.toString());
if (QUANTITY.equals(key) && item != null) {
receiptItem.setQuantity(setFieldValue(item, readResults, includeTextDetails));
} else if (NAME.equals(key) && item != null) {
receiptItem.setName(setFieldValue(item, readResults, includeTextDetails));
} else if (PRICE.equals(key) && item != null) {
receiptItem.setPrice(setFieldValue(item, readResults, includeTextDetails));
} else if (TOTAL_PRICE.equals(key) && item != null) {
receiptItem.setTotalPrice(setFieldValue(item, readResults, includeTextDetails));
}
}
receiptItemList.add(receiptItem);
}
return receiptItemList;
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link IntegerValue}
*
* @param serviceIntegerValue The integer value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link IntegerValue}.
*/
private static IntegerValue toFieldValueInteger(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceIntegerValue) {
if (serviceIntegerValue.getValueNumber() != null) {
return new IntegerValue(serviceIntegerValue.getText(), toBoundingBox(serviceIntegerValue.getBoundingBox()),
serviceIntegerValue.getValueInteger(), serviceIntegerValue.getPage());
}
return new IntegerValue(serviceIntegerValue.getText(), toBoundingBox(serviceIntegerValue.getBoundingBox()), null, serviceIntegerValue.getPage());
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link StringValue}.
*
* @param serviceStringValue The string value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link StringValue}.
*/
private static StringValue toFieldValueString(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceStringValue) {
return new StringValue(serviceStringValue.getText(), toBoundingBox(serviceStringValue.getBoundingBox()),
serviceStringValue.getValueString(), serviceStringValue.getPage());
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link FloatValue}.
*
* @param serviceFloatValue The float value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link FloatValue}.
*/
private static FloatValue toFieldValueNumber(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceFloatValue) {
if (serviceFloatValue.getValueNumber() != null) {
return new FloatValue(serviceFloatValue.getText(), toBoundingBox(serviceFloatValue.getBoundingBox()),
serviceFloatValue.getValueNumber(), serviceFloatValue.getPage());
}
return new FloatValue(serviceFloatValue.getText(), toBoundingBox(serviceFloatValue.getBoundingBox()), null, serviceFloatValue.getPage());
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link StringValue}.
*
* @param serviceDateValue The string value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link StringValue}.
*/
private static StringValue toFieldValuePhoneNumber(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceDateValue) {
return new StringValue(serviceDateValue.getText(), toBoundingBox(serviceDateValue.getBoundingBox()),
serviceDateValue.getValuePhoneNumber(), serviceDateValue.getPage());
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link DateValue}.
*
* @param serviceDateValue The string value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link StringValue}.
*/
private static DateValue toFieldValueDate(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceDateValue) {
return new DateValue(serviceDateValue.getText(), toBoundingBox(serviceDateValue.getBoundingBox()),
serviceDateValue.getValueDate(), serviceDateValue.getPage());
}
/**
* Helper method to convert the service returned {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
* to a SDK level {@link TimeValue}.
*
* @param serviceDateValue The string value returned by the service in
* {@link com.azure.ai.formrecognizer.implementation.models.FieldValue
*
* @return The {@link TimeValue}.
*
*/
private static TimeValue toFieldValueTime(com.azure.ai.formrecognizer.implementation.models.FieldValue
serviceDateValue) {
return new TimeValue(serviceDateValue.getText(), toBoundingBox(serviceDateValue.getBoundingBox()),
serviceDateValue.getValueTime(), serviceDateValue.getPage());
}
} |
ditto, here and other places for the new methods, the implementation is the same as parent class why are they needed in every child class? | public String getString(String propertyName) {
return super.getString(propertyName);
} | return super.getString(propertyName); | public String getString(String propertyName) {
return super.getString(propertyName);
} | class Offer extends Resource {
/**
* Initialize an new instance of the Offer object.
*
* @param offerThroughput the throughput value for this offer.
*/
public Offer(int offerThroughput) {
super();
this.setOfferVersion(Constants.Properties.OFFER_VERSION_V2);
this.setOfferType("");
ObjectNode content = Utils.getSimpleObjectMapper().createObjectNode();
content.put(Constants.Properties.OFFER_THROUGHPUT, offerThroughput);
this.setContent(content);
}
/**
* Initialize an offer object from json string.
*
* @param jsonString the json string that represents the offer.
*/
public Offer(String jsonString) {
super(jsonString);
}
/**
* Gets the self-link of a resource to which the resource offer applies.
*
* @return the resource link.
*/
public String getResourceLink() {
return super.getString(Constants.Properties.RESOURCE_LINK);
}
/**
* Sets the self-link of a resource to which the resource offer applies.
*
* @param resourceLink the resource link.
*/
void setResourceLink(String resourceLink) {
BridgeInternal.setProperty(this, Constants.Properties.RESOURCE_LINK, resourceLink);
}
/**
* Sets the target resource id of a resource to which this offer applies.
*
* @return the resource id.
*/
public String getOfferResourceId() {
return super.getString(Constants.Properties.OFFER_RESOURCE_ID);
}
/**
* Sets the target resource id of a resource to which this offer applies.
*
* @param resourceId the resource id.
*/
void setOfferResourceId(String resourceId) {
BridgeInternal.setProperty(this, Constants.Properties.OFFER_RESOURCE_ID, resourceId);
}
/**
* Gets the OfferType for the resource offer.
*
* @return the offer type.
*/
public String getOfferType() {
return super.getString(Constants.Properties.OFFER_TYPE);
}
/**
* Sets the OfferType for the resource offer.
*
* @param offerType the offer type.
*/
public void setOfferType(String offerType) {
BridgeInternal.setProperty(this, Constants.Properties.OFFER_TYPE, offerType);
}
/**
* Gets the version of the current offer.
*
* @return the offer version.
*/
public String getOfferVersion() {
return super.getString(Constants.Properties.OFFER_VERSION);
}
/**
* Sets the offer version.
*
* @param offerVersion the version of the offer.
*/
public void setOfferVersion(String offerVersion) {
BridgeInternal.setProperty(this, Constants.Properties.OFFER_VERSION, offerVersion);
}
/**
* Gets the offer throughput for this offer.
*
* @return the offer throughput.
*/
public int getThroughput() {
return this.getContent().get(Constants.Properties.OFFER_THROUGHPUT).asInt();
}
/**
* Sets the offer throughput for this offer.
*
* @param throughput the throughput of this offer.
*/
public void setThroughput(int throughput) {
this.getContent().put(Constants.Properties.OFFER_THROUGHPUT, throughput);
}
private ObjectNode getContent() {
return BridgeInternal.getObject(this, Constants.Properties.OFFER_CONTENT);
}
private void setContent(ObjectNode offerContent) {
BridgeInternal.setProperty(this, Constants.Properties.OFFER_CONTENT, offerContent);
}
@Override
@Override
public Integer getInt(String propertyName) {
return super.getInt(propertyName);
}
} | class Offer extends Resource {
/**
* Initialize an new instance of the Offer object.
*
* @param offerThroughput the throughput value for this offer.
*/
public Offer(int offerThroughput) {
super();
this.setOfferVersion(Constants.Properties.OFFER_VERSION_V2);
this.setOfferType("");
ObjectNode content = Utils.getSimpleObjectMapper().createObjectNode();
content.put(Constants.Properties.OFFER_THROUGHPUT, offerThroughput);
this.setContent(content);
}
/**
* Initialize an offer object from json string.
*
* @param jsonString the json string that represents the offer.
*/
public Offer(String jsonString) {
super(jsonString);
}
/**
* Gets the self-link of a resource to which the resource offer applies.
*
* @return the resource link.
*/
public String getResourceLink() {
return super.getString(Constants.Properties.RESOURCE_LINK);
}
/**
* Sets the self-link of a resource to which the resource offer applies.
*
* @param resourceLink the resource link.
*/
void setResourceLink(String resourceLink) {
BridgeInternal.setProperty(this, Constants.Properties.RESOURCE_LINK, resourceLink);
}
/**
* Sets the target resource id of a resource to which this offer applies.
*
* @return the resource id.
*/
public String getOfferResourceId() {
return super.getString(Constants.Properties.OFFER_RESOURCE_ID);
}
/**
* Sets the target resource id of a resource to which this offer applies.
*
* @param resourceId the resource id.
*/
void setOfferResourceId(String resourceId) {
BridgeInternal.setProperty(this, Constants.Properties.OFFER_RESOURCE_ID, resourceId);
}
/**
* Gets the OfferType for the resource offer.
*
* @return the offer type.
*/
public String getOfferType() {
return super.getString(Constants.Properties.OFFER_TYPE);
}
/**
* Sets the OfferType for the resource offer.
*
* @param offerType the offer type.
*/
public void setOfferType(String offerType) {
BridgeInternal.setProperty(this, Constants.Properties.OFFER_TYPE, offerType);
}
/**
* Gets the version of the current offer.
*
* @return the offer version.
*/
public String getOfferVersion() {
return super.getString(Constants.Properties.OFFER_VERSION);
}
/**
* Sets the offer version.
*
* @param offerVersion the version of the offer.
*/
public void setOfferVersion(String offerVersion) {
BridgeInternal.setProperty(this, Constants.Properties.OFFER_VERSION, offerVersion);
}
/**
* Gets the offer throughput for this offer.
*
* @return the offer throughput.
*/
public int getThroughput() {
return this.getContent().get(Constants.Properties.OFFER_THROUGHPUT).asInt();
}
/**
* Sets the offer throughput for this offer.
*
* @param throughput the throughput of this offer.
*/
public void setThroughput(int throughput) {
this.getContent().put(Constants.Properties.OFFER_THROUGHPUT, throughput);
}
private ObjectNode getContent() {
return BridgeInternal.getObject(this, Constants.Properties.OFFER_CONTENT);
}
private void setContent(ObjectNode offerContent) {
BridgeInternal.setProperty(this, Constants.Properties.OFFER_CONTENT, offerContent);
}
@Override
@Override
public Integer getInt(String propertyName) {
return super.getInt(propertyName);
}
} |
ditto | public String toJson() {
return super.toJson();
} | return super.toJson(); | public String toJson() {
return super.toJson();
} | class PartitionKeyRange extends Resource {
public static final String MINIMUM_INCLUSIVE_EFFECTIVE_PARTITION_KEY = "";
public static final String MAXIMUM_EXCLUSIVE_EFFECTIVE_PARTITION_KEY = "FF";
public static final String MASTER_PARTITION_KEY_RANGE_ID = "M";
/**
* Constructor.
*
* @param objectNode the {@link ObjectNode} that represent the
* {@link JsonSerializable}
*/
public PartitionKeyRange(ObjectNode objectNode) {
super(objectNode);
}
/**
* Initialize a partition key range object.
*/
public PartitionKeyRange() {
super();
}
/**
* Initialize a partition key range object from json string.
*
* @param jsonString
* the json string that represents the partition key range
* object.
*/
public PartitionKeyRange(String jsonString) {
super(jsonString);
}
/**
* Set id of partition key range
* @param id the name of the resource.
* @return the partition key range
*/
public PartitionKeyRange setId(String id) {
super.setId(id);
return this;
}
public PartitionKeyRange(String id, String minInclusive, String maxExclusive) {
super();
this.setId(id);
this.setMinInclusive(minInclusive);
this.setMaxExclusive(maxExclusive);
}
public PartitionKeyRange(String id, String minInclusive, String maxExclusive, List<String> parents) {
super();
this.setId(id);
this.setMinInclusive(minInclusive);
this.setMaxExclusive(maxExclusive);
this.setParents(parents);
}
public String getMinInclusive() {
return super.getString("minInclusive");
}
public void setMinInclusive(String minInclusive) {
BridgeInternal.setProperty(this, "minInclusive", minInclusive);
}
public String getMaxExclusive() {
return super.getString("maxExclusive");
}
public void setMaxExclusive(String maxExclusive) {
BridgeInternal.setProperty(this, "maxExclusive", maxExclusive);
}
public Range<String> toRange() {
return new Range<String>(this.getMinInclusive(), this.getMaxExclusive(), true, false);
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof PartitionKeyRange)) {
return false;
}
PartitionKeyRange otherRange = (PartitionKeyRange) obj;
return this.getId().compareTo(otherRange.getId()) == 0
&& this.getMinInclusive().compareTo(otherRange.getMinInclusive()) == 0
&& this.getMaxExclusive().compareTo(otherRange.getMaxExclusive()) == 0;
}
@Override
public int hashCode() {
int hash = 0;
hash = (hash * 397) ^ this.getId().hashCode();
hash = (hash * 397) ^ this.getMinInclusive().hashCode();
hash = (hash * 397) ^ this.getMaxExclusive().hashCode();
return hash;
}
public void setParents(List<String> parents) {
BridgeInternal.setProperty(this, Constants.Properties.PARENTS, parents);
}
/**
* Used internally to indicate the ID of the parent range
* @return a list partition key range ID
*/
public List<String> getParents() { return this.getList(Constants.Properties.PARENTS, String.class); }
@Override
} | class PartitionKeyRange extends Resource {
public static final String MINIMUM_INCLUSIVE_EFFECTIVE_PARTITION_KEY = "";
public static final String MAXIMUM_EXCLUSIVE_EFFECTIVE_PARTITION_KEY = "FF";
public static final String MASTER_PARTITION_KEY_RANGE_ID = "M";
/**
* Constructor.
*
* @param objectNode the {@link ObjectNode} that represent the
* {@link JsonSerializable}
*/
public PartitionKeyRange(ObjectNode objectNode) {
super(objectNode);
}
/**
* Initialize a partition key range object.
*/
public PartitionKeyRange() {
super();
}
/**
* Initialize a partition key range object from json string.
*
* @param jsonString
* the json string that represents the partition key range
* object.
*/
public PartitionKeyRange(String jsonString) {
super(jsonString);
}
/**
* Set id of partition key range
* @param id the name of the resource.
* @return the partition key range
*/
public PartitionKeyRange setId(String id) {
super.setId(id);
return this;
}
public PartitionKeyRange(String id, String minInclusive, String maxExclusive) {
super();
this.setId(id);
this.setMinInclusive(minInclusive);
this.setMaxExclusive(maxExclusive);
}
public PartitionKeyRange(String id, String minInclusive, String maxExclusive, List<String> parents) {
super();
this.setId(id);
this.setMinInclusive(minInclusive);
this.setMaxExclusive(maxExclusive);
this.setParents(parents);
}
public String getMinInclusive() {
return super.getString("minInclusive");
}
public void setMinInclusive(String minInclusive) {
BridgeInternal.setProperty(this, "minInclusive", minInclusive);
}
public String getMaxExclusive() {
return super.getString("maxExclusive");
}
public void setMaxExclusive(String maxExclusive) {
BridgeInternal.setProperty(this, "maxExclusive", maxExclusive);
}
public Range<String> toRange() {
return new Range<String>(this.getMinInclusive(), this.getMaxExclusive(), true, false);
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof PartitionKeyRange)) {
return false;
}
PartitionKeyRange otherRange = (PartitionKeyRange) obj;
return this.getId().compareTo(otherRange.getId()) == 0
&& this.getMinInclusive().compareTo(otherRange.getMinInclusive()) == 0
&& this.getMaxExclusive().compareTo(otherRange.getMaxExclusive()) == 0;
}
@Override
public int hashCode() {
int hash = 0;
hash = (hash * 397) ^ this.getId().hashCode();
hash = (hash * 397) ^ this.getMinInclusive().hashCode();
hash = (hash * 397) ^ this.getMaxExclusive().hashCode();
return hash;
}
public void setParents(List<String> parents) {
BridgeInternal.setProperty(this, Constants.Properties.PARENTS, parents);
}
/**
* Used internally to indicate the ID of the parent range
* @return a list partition key range ID
*/
public List<String> getParents() { return this.getList(Constants.Properties.PARENTS, String.class); }
@Override
} |
ditto | public Object get(String propertyName) {
return super.get(propertyName);
} | return super.get(propertyName); | public Object get(String propertyName) {
return super.get(propertyName);
} | class Address extends Resource {
/**
* Constructor.
*
* @param objectNode the {@link ObjectNode} that represent the
* {@link JsonSerializable}
*/
public Address(ObjectNode objectNode) {
super(objectNode);
}
/**
* Initialize an offer object.
*/
public Address() {
super();
}
/**
* Initialize an address object from json string.
*
* @param jsonString the json string that represents the address.
*/
public Address(String jsonString) {
super(jsonString);
}
public boolean isPrimary() {
return Boolean.TRUE.equals(super.getBoolean(Constants.Properties.IS_PRIMARY));
}
void setIsPrimary(boolean isPrimary) {
BridgeInternal.setProperty(this, Constants.Properties.IS_PRIMARY, isPrimary);
}
public String getProtocolScheme() {
return super.getString(Constants.Properties.PROTOCOL);
}
void setProtocol(String protocol) {
BridgeInternal.setProperty(this, Constants.Properties.PROTOCOL, protocol);
}
public String getLogicalUri() {
return super.getString(Constants.Properties.LOGICAL_URI);
}
void setLogicalUri(String logicalUri) {
BridgeInternal.setProperty(this, Constants.Properties.LOGICAL_URI, logicalUri);
}
public String getPhyicalUri() {
return super.getString(Constants.Properties.PHYISCAL_URI);
}
void setPhysicalUri(String phyicalUri) {
BridgeInternal.setProperty(this, Constants.Properties.PHYISCAL_URI, phyicalUri);
}
public String getPartitionIndex() {
return super.getString(Constants.Properties.PARTITION_INDEX);
}
void setPartitionIndex(String partitionIndex) {
BridgeInternal.setProperty(this, Constants.Properties.PARTITION_INDEX, partitionIndex);
}
public String getParitionKeyRangeId() {
return super.getString(Constants.Properties.PARTITION_KEY_RANGE_ID);
}
public void setPartitionKeyRangeId(String partitionKeyRangeId) {
BridgeInternal.setProperty(this, Constants.Properties.PARTITION_KEY_RANGE_ID, partitionKeyRangeId);
}
@Override
} | class Address extends Resource {
/**
* Constructor.
*
* @param objectNode the {@link ObjectNode} that represent the
* {@link JsonSerializable}
*/
public Address(ObjectNode objectNode) {
super(objectNode);
}
/**
* Initialize an offer object.
*/
public Address() {
super();
}
/**
* Initialize an address object from json string.
*
* @param jsonString the json string that represents the address.
*/
public Address(String jsonString) {
super(jsonString);
}
public boolean isPrimary() {
return Boolean.TRUE.equals(super.getBoolean(Constants.Properties.IS_PRIMARY));
}
void setIsPrimary(boolean isPrimary) {
BridgeInternal.setProperty(this, Constants.Properties.IS_PRIMARY, isPrimary);
}
public String getProtocolScheme() {
return super.getString(Constants.Properties.PROTOCOL);
}
void setProtocol(String protocol) {
BridgeInternal.setProperty(this, Constants.Properties.PROTOCOL, protocol);
}
public String getLogicalUri() {
return super.getString(Constants.Properties.LOGICAL_URI);
}
void setLogicalUri(String logicalUri) {
BridgeInternal.setProperty(this, Constants.Properties.LOGICAL_URI, logicalUri);
}
public String getPhyicalUri() {
return super.getString(Constants.Properties.PHYISCAL_URI);
}
void setPhysicalUri(String phyicalUri) {
BridgeInternal.setProperty(this, Constants.Properties.PHYISCAL_URI, phyicalUri);
}
public String getPartitionIndex() {
return super.getString(Constants.Properties.PARTITION_INDEX);
}
void setPartitionIndex(String partitionIndex) {
BridgeInternal.setProperty(this, Constants.Properties.PARTITION_INDEX, partitionIndex);
}
public String getParitionKeyRangeId() {
return super.getString(Constants.Properties.PARTITION_KEY_RANGE_ID);
}
public void setPartitionKeyRangeId(String partitionKeyRangeId) {
BridgeInternal.setProperty(this, Constants.Properties.PARTITION_KEY_RANGE_ID, partitionKeyRangeId);
}
@Override
} |
discussed offline. | public Object get(String propertyName) {
return super.get(propertyName);
} | return super.get(propertyName); | public Object get(String propertyName) {
return super.get(propertyName);
} | class DatabaseAccount extends Resource {
private ConsistencyPolicy consistencyPolicy;
private long maxMediaStorageUsageInMB;
private long mediaStorageUsageInMB;
private ReplicationPolicy replicationPolicy;
private ReplicationPolicy systemReplicationPolicy;
private Map<String, Object> queryEngineConfiguration;
/**
* Constructor.
*
* @param objectNode the {@link ObjectNode} that represent the
* {@link JsonSerializable}
*/
DatabaseAccount(ObjectNode objectNode) {
super(objectNode);
}
/**
* Constructor.
*/
public DatabaseAccount() {
BridgeInternal.setResourceSelfLink(this, "");
}
/**
* Initialize a database account object from json string.
*
* @param jsonString the json string that represents the database account.
*/
public DatabaseAccount(String jsonString) {
super(jsonString);
}
/**
* Get the databases link of the databaseAccount.
*
* @return the databases link.
*/
String getDatabasesLink() {
return super.getString(Constants.Properties.DATABASES_LINK);
}
/**
* Set the databases of the databaseAccount.
*
* @param databasesLink the databases link.
*/
void setDatabasesLink(String databasesLink) {
BridgeInternal.setProperty(this, Constants.Properties.DATABASES_LINK, databasesLink);
}
/**
* Get the medialink of the databaseAccount.
*
* @return the media link.
*/
String getMediaLink() {
return super.getString(Constants.Properties.MEDIA_LINK);
}
/**
* Set the medialink of the databaseAccount.
*
* @param medialink the media link.
*/
void setMediaLink(String medialink) {
BridgeInternal.setProperty(this, Constants.Properties.MEDIA_LINK, medialink);
}
/**
* Get the addresseslink of the databaseAccount.
*
* @return the addresses link.
*/
String getAddressesLink() {
return super.getString(Constants.Properties.ADDRESS_LINK);
}
/**
* Set the addresseslink of the databaseAccount.
*
* @param addresseslink the addresses link.
*/
void setAddressesLink(String addresseslink) {
BridgeInternal.setProperty(this, Constants.Properties.ADDRESS_LINK, addresseslink);
}
/**
* Attachment content (media) storage quota in MBs Retrieved from gateway.
*
* @return the max media storage usage in MB.
*/
long getMaxMediaStorageUsageInMB() {
return this.maxMediaStorageUsageInMB;
}
public void setMaxMediaStorageUsageInMB(long value) {
this.maxMediaStorageUsageInMB = value;
}
/**
* Current attachment content (media) usage in MBs.
* <p>
* Retrieved from gateway. Value is returned from cached information updated
* periodically and is not guaranteed to be real time.
*
* @return the media storage usage in MB.
*/
long getMediaStorageUsageInMB() {
return this.mediaStorageUsageInMB;
}
public void setMediaStorageUsageInMB(long value) {
this.mediaStorageUsageInMB = value;
}
/**
* Gets the ConsistencyPolicy properties.
*
* @return the consistency policy.
*/
public ConsistencyPolicy getConsistencyPolicy() {
if (this.consistencyPolicy == null) {
this.consistencyPolicy = super.getObject(Constants.Properties.USER_CONSISTENCY_POLICY,
ConsistencyPolicy.class);
if (this.consistencyPolicy == null) {
this.consistencyPolicy = new ConsistencyPolicy();
}
}
return this.consistencyPolicy;
}
/**
* Gets the ReplicationPolicy properties.
*
* @return the replication policy.
*/
public ReplicationPolicy getReplicationPolicy() {
if (this.replicationPolicy == null) {
this.replicationPolicy = super.getObject(Constants.Properties.USER_REPLICATION_POLICY,
ReplicationPolicy.class);
if (this.replicationPolicy == null) {
this.replicationPolicy = new ReplicationPolicy();
}
}
return this.replicationPolicy;
}
/**
* Gets the SystemReplicationPolicy properties.
*
* @return the system replication policy.
*/
public ReplicationPolicy getSystemReplicationPolicy() {
if (this.systemReplicationPolicy == null) {
this.systemReplicationPolicy = super.getObject(Constants.Properties.SYSTEM_REPLICATION_POLICY,
ReplicationPolicy.class);
if (this.systemReplicationPolicy == null) {
this.systemReplicationPolicy = new ReplicationPolicy();
}
}
return this.systemReplicationPolicy;
}
/**
* Gets the QueryEngineConfiguration properties.
*
* @return the query engine configuration.
*/
public Map<String, Object> getQueryEngineConfiguration() {
if (this.queryEngineConfiguration == null) {
String queryEngineConfigurationJsonString = super.getObject(Constants.Properties.QUERY_ENGINE_CONFIGURATION,
String.class);
if (StringUtils.isNotEmpty(queryEngineConfigurationJsonString)) {
TypeReference<HashMap<String, Object>> typeRef = new TypeReference<HashMap<String, Object>>() {
};
try {
this.queryEngineConfiguration = Utils.getSimpleObjectMapper()
.readValue(queryEngineConfigurationJsonString, typeRef);
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
if (this.queryEngineConfiguration == null) {
this.queryEngineConfiguration = new HashMap<>();
}
}
}
return this.queryEngineConfiguration;
}
/**
* Gets the list of writable locations for this database account.
*
* @return the list of writable locations.
*/
public Iterable<DatabaseAccountLocation> getWritableLocations() {
return super.getCollection(Constants.Properties.WRITABLE_LOCATIONS, DatabaseAccountLocation.class);
}
/**
* Sets the list of writable locations for this database account.
* <p>
* The list of writable locations are returned by the service.
*
* @param locations the list of writable locations.
*/
public void setWritableLocations(Iterable<DatabaseAccountLocation> locations) {
BridgeInternal.setProperty(this, Constants.Properties.WRITABLE_LOCATIONS, locations);
}
/**
* Gets the list of readable locations for this database account.
*
* @return the list of readable locations.
*/
public Iterable<DatabaseAccountLocation> getReadableLocations() {
return super.getCollection(Constants.Properties.READABLE_LOCATIONS, DatabaseAccountLocation.class);
}
/**
* Sets the list of readable locations for this database account.
* <p>
* The list of readable locations are returned by the service.
*
* @param locations the list of readable locations.
*/
public void setReadableLocations(Iterable<DatabaseAccountLocation> locations) {
BridgeInternal.setProperty(this, Constants.Properties.READABLE_LOCATIONS, locations);
}
/**
* Gets if enable multiple write locations is set.
*
* @return the true if multiple write locations are set
*/
public boolean getEnableMultipleWriteLocations() {
return ObjectUtils.defaultIfNull(super.getBoolean(Constants.Properties.ENABLE_MULTIPLE_WRITE_LOCATIONS), false);
}
public void setEnableMultipleWriteLocations(boolean value) {
BridgeInternal.setProperty(this, Constants.Properties.ENABLE_MULTIPLE_WRITE_LOCATIONS, value);
}
protected void populatePropertyBag() {
super.populatePropertyBag();
if (this.consistencyPolicy != null) {
ModelBridgeInternal.populatePropertyBagJsonSerializable(this.consistencyPolicy);
BridgeInternal.setProperty(this, Constants.Properties.USER_CONSISTENCY_POLICY, this.consistencyPolicy);
}
}
@Override
public String toJson() {
this.populatePropertyBag();
return super.toJson();
}
@Override
} | class DatabaseAccount extends Resource {
private ConsistencyPolicy consistencyPolicy;
private long maxMediaStorageUsageInMB;
private long mediaStorageUsageInMB;
private ReplicationPolicy replicationPolicy;
private ReplicationPolicy systemReplicationPolicy;
private Map<String, Object> queryEngineConfiguration;
/**
* Constructor.
*
* @param objectNode the {@link ObjectNode} that represent the
* {@link JsonSerializable}
*/
DatabaseAccount(ObjectNode objectNode) {
super(objectNode);
}
/**
* Constructor.
*/
public DatabaseAccount() {
BridgeInternal.setResourceSelfLink(this, "");
}
/**
* Initialize a database account object from json string.
*
* @param jsonString the json string that represents the database account.
*/
public DatabaseAccount(String jsonString) {
super(jsonString);
}
/**
* Get the databases link of the databaseAccount.
*
* @return the databases link.
*/
String getDatabasesLink() {
return super.getString(Constants.Properties.DATABASES_LINK);
}
/**
* Set the databases of the databaseAccount.
*
* @param databasesLink the databases link.
*/
void setDatabasesLink(String databasesLink) {
BridgeInternal.setProperty(this, Constants.Properties.DATABASES_LINK, databasesLink);
}
/**
* Get the medialink of the databaseAccount.
*
* @return the media link.
*/
String getMediaLink() {
return super.getString(Constants.Properties.MEDIA_LINK);
}
/**
* Set the medialink of the databaseAccount.
*
* @param medialink the media link.
*/
void setMediaLink(String medialink) {
BridgeInternal.setProperty(this, Constants.Properties.MEDIA_LINK, medialink);
}
/**
* Get the addresseslink of the databaseAccount.
*
* @return the addresses link.
*/
String getAddressesLink() {
return super.getString(Constants.Properties.ADDRESS_LINK);
}
/**
* Set the addresseslink of the databaseAccount.
*
* @param addresseslink the addresses link.
*/
void setAddressesLink(String addresseslink) {
BridgeInternal.setProperty(this, Constants.Properties.ADDRESS_LINK, addresseslink);
}
/**
* Attachment content (media) storage quota in MBs Retrieved from gateway.
*
* @return the max media storage usage in MB.
*/
long getMaxMediaStorageUsageInMB() {
return this.maxMediaStorageUsageInMB;
}
public void setMaxMediaStorageUsageInMB(long value) {
this.maxMediaStorageUsageInMB = value;
}
/**
* Current attachment content (media) usage in MBs.
* <p>
* Retrieved from gateway. Value is returned from cached information updated
* periodically and is not guaranteed to be real time.
*
* @return the media storage usage in MB.
*/
long getMediaStorageUsageInMB() {
return this.mediaStorageUsageInMB;
}
public void setMediaStorageUsageInMB(long value) {
this.mediaStorageUsageInMB = value;
}
/**
* Gets the ConsistencyPolicy properties.
*
* @return the consistency policy.
*/
public ConsistencyPolicy getConsistencyPolicy() {
if (this.consistencyPolicy == null) {
this.consistencyPolicy = super.getObject(Constants.Properties.USER_CONSISTENCY_POLICY,
ConsistencyPolicy.class);
if (this.consistencyPolicy == null) {
this.consistencyPolicy = new ConsistencyPolicy();
}
}
return this.consistencyPolicy;
}
/**
* Gets the ReplicationPolicy properties.
*
* @return the replication policy.
*/
public ReplicationPolicy getReplicationPolicy() {
if (this.replicationPolicy == null) {
this.replicationPolicy = super.getObject(Constants.Properties.USER_REPLICATION_POLICY,
ReplicationPolicy.class);
if (this.replicationPolicy == null) {
this.replicationPolicy = new ReplicationPolicy();
}
}
return this.replicationPolicy;
}
/**
* Gets the SystemReplicationPolicy properties.
*
* @return the system replication policy.
*/
public ReplicationPolicy getSystemReplicationPolicy() {
if (this.systemReplicationPolicy == null) {
this.systemReplicationPolicy = super.getObject(Constants.Properties.SYSTEM_REPLICATION_POLICY,
ReplicationPolicy.class);
if (this.systemReplicationPolicy == null) {
this.systemReplicationPolicy = new ReplicationPolicy();
}
}
return this.systemReplicationPolicy;
}
/**
* Gets the QueryEngineConfiguration properties.
*
* @return the query engine configuration.
*/
public Map<String, Object> getQueryEngineConfiguration() {
if (this.queryEngineConfiguration == null) {
String queryEngineConfigurationJsonString = super.getObject(Constants.Properties.QUERY_ENGINE_CONFIGURATION,
String.class);
if (StringUtils.isNotEmpty(queryEngineConfigurationJsonString)) {
TypeReference<HashMap<String, Object>> typeRef = new TypeReference<HashMap<String, Object>>() {
};
try {
this.queryEngineConfiguration = Utils.getSimpleObjectMapper()
.readValue(queryEngineConfigurationJsonString, typeRef);
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
if (this.queryEngineConfiguration == null) {
this.queryEngineConfiguration = new HashMap<>();
}
}
}
return this.queryEngineConfiguration;
}
/**
* Gets the list of writable locations for this database account.
*
* @return the list of writable locations.
*/
public Iterable<DatabaseAccountLocation> getWritableLocations() {
return super.getCollection(Constants.Properties.WRITABLE_LOCATIONS, DatabaseAccountLocation.class);
}
/**
* Sets the list of writable locations for this database account.
* <p>
* The list of writable locations are returned by the service.
*
* @param locations the list of writable locations.
*/
public void setWritableLocations(Iterable<DatabaseAccountLocation> locations) {
BridgeInternal.setProperty(this, Constants.Properties.WRITABLE_LOCATIONS, locations);
}
/**
* Gets the list of readable locations for this database account.
*
* @return the list of readable locations.
*/
public Iterable<DatabaseAccountLocation> getReadableLocations() {
return super.getCollection(Constants.Properties.READABLE_LOCATIONS, DatabaseAccountLocation.class);
}
/**
* Sets the list of readable locations for this database account.
* <p>
* The list of readable locations are returned by the service.
*
* @param locations the list of readable locations.
*/
public void setReadableLocations(Iterable<DatabaseAccountLocation> locations) {
BridgeInternal.setProperty(this, Constants.Properties.READABLE_LOCATIONS, locations);
}
/**
* Gets if enable multiple write locations is set.
*
* @return the true if multiple write locations are set
*/
public boolean getEnableMultipleWriteLocations() {
return ObjectUtils.defaultIfNull(super.getBoolean(Constants.Properties.ENABLE_MULTIPLE_WRITE_LOCATIONS), false);
}
public void setEnableMultipleWriteLocations(boolean value) {
BridgeInternal.setProperty(this, Constants.Properties.ENABLE_MULTIPLE_WRITE_LOCATIONS, value);
}
protected void populatePropertyBag() {
super.populatePropertyBag();
if (this.consistencyPolicy != null) {
ModelBridgeInternal.populatePropertyBagJsonSerializable(this.consistencyPolicy);
BridgeInternal.setProperty(this, Constants.Properties.USER_CONSISTENCY_POLICY, this.consistencyPolicy);
}
}
@Override
public String toJson() {
this.populatePropertyBag();
return super.toJson();
}
@Override
} |
(Could be out of scope of this PR) Potential double serialization. Instead of converting toJson and again back from Json, we could just use the propertybag to get UDF by using Resource(ObjectNode) overload. | public Mono<CosmosAsyncUserDefinedFunctionResponse> replace(CosmosUserDefinedFunctionProperties udfSettings) {
return container.getDatabase()
.getDocClientWrapper()
.replaceUserDefinedFunction(new UserDefinedFunction(ModelBridgeInternal.toJsonFromJsonSerializable(udfSettings)), null)
.map(response -> ModelBridgeInternal.createCosmosAsyncUserDefinedFunctionResponse(response, container))
.single();
} | .replaceUserDefinedFunction(new UserDefinedFunction(ModelBridgeInternal.toJsonFromJsonSerializable(udfSettings)), null) | public Mono<CosmosAsyncUserDefinedFunctionResponse> replace(CosmosUserDefinedFunctionProperties udfSettings) {
return container.getDatabase()
.getDocClientWrapper()
.replaceUserDefinedFunction(new UserDefinedFunction(ModelBridgeInternal.toJsonFromJsonSerializable(udfSettings)), null)
.map(response -> ModelBridgeInternal.createCosmosAsyncUserDefinedFunctionResponse(response, container))
.single();
} | class CosmosAsyncUserDefinedFunction {
@SuppressWarnings("EnforceFinalFields")
private final CosmosAsyncContainer container;
private String id;
CosmosAsyncUserDefinedFunction(String id, CosmosAsyncContainer container) {
this.id = id;
this.container = container;
}
/**
* Get the id of the {@link CosmosAsyncUserDefinedFunction}
*
* @return the id of the {@link CosmosAsyncUserDefinedFunction}
*/
public String getId() {
return id;
}
/**
* Set the id of the {@link CosmosAsyncUserDefinedFunction}
*
* @param id the id of the {@link CosmosAsyncUserDefinedFunction}
* @return the same {@link CosmosAsyncUserDefinedFunction} that had the id set
*/
CosmosAsyncUserDefinedFunction setId(String id) {
this.id = id;
return this;
}
/**
* Read a user defined function.
* <p>
* After subscription the operation will be performed.
* The {@link Mono} upon successful completion will contain a single resource response for the read user defined
* function.
* In case of failure the {@link Mono} will error.
*
* @return an {@link Mono} containing the single resource response for the read user defined function or an error.
*/
public Mono<CosmosAsyncUserDefinedFunctionResponse> read() {
return container.getDatabase().getDocClientWrapper().readUserDefinedFunction(getLink(), null)
.map(response -> ModelBridgeInternal.createCosmosAsyncUserDefinedFunctionResponse(response, container)).single();
}
/**
* Replaces a cosmos user defined function.
* <p>
* After subscription the operation will be performed.
* The {@link Mono} upon successful completion will contain a single resource response with the replaced user
* defined function.
* In case of failure the {@link Mono} will error.
*
* @param udfSettings the cosmos user defined function properties.
* @return an {@link Mono} containing the single resource response with the replaced cosmos user defined function
* or an error.
*/
/**
* Deletes a cosmos user defined function.
* <p>
* After subscription the operation will be performed.
* The {@link Mono} upon successful completion will contain a single resource response for the deleted user
* defined function.
* In case of failure the {@link Mono} will error.
*
* @return an {@link Mono} containing the single resource response for the deleted cosmos user defined function or
* an error.
*/
public Mono<CosmosAsyncUserDefinedFunctionResponse> delete() {
return container.getDatabase()
.getDocClientWrapper()
.deleteUserDefinedFunction(this.getLink(), null)
.map(response -> ModelBridgeInternal.createCosmosAsyncUserDefinedFunctionResponse(response, container))
.single();
}
String getURIPathSegment() {
return Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT;
}
String getParentLink() {
return container.getLink();
}
String getLink() {
StringBuilder builder = new StringBuilder();
builder.append(getParentLink());
builder.append("/");
builder.append(getURIPathSegment());
builder.append("/");
builder.append(getId());
return builder.toString();
}
} | class CosmosAsyncUserDefinedFunction {
@SuppressWarnings("EnforceFinalFields")
private final CosmosAsyncContainer container;
private String id;
CosmosAsyncUserDefinedFunction(String id, CosmosAsyncContainer container) {
this.id = id;
this.container = container;
}
/**
* Get the id of the {@link CosmosAsyncUserDefinedFunction}
*
* @return the id of the {@link CosmosAsyncUserDefinedFunction}
*/
public String getId() {
return id;
}
/**
* Set the id of the {@link CosmosAsyncUserDefinedFunction}
*
* @param id the id of the {@link CosmosAsyncUserDefinedFunction}
* @return the same {@link CosmosAsyncUserDefinedFunction} that had the id set
*/
CosmosAsyncUserDefinedFunction setId(String id) {
this.id = id;
return this;
}
/**
* Read a user defined function.
* <p>
* After subscription the operation will be performed.
* The {@link Mono} upon successful completion will contain a single resource response for the read user defined
* function.
* In case of failure the {@link Mono} will error.
*
* @return an {@link Mono} containing the single resource response for the read user defined function or an error.
*/
public Mono<CosmosAsyncUserDefinedFunctionResponse> read() {
return container.getDatabase().getDocClientWrapper().readUserDefinedFunction(getLink(), null)
.map(response -> ModelBridgeInternal.createCosmosAsyncUserDefinedFunctionResponse(response, container)).single();
}
/**
* Replaces a cosmos user defined function.
* <p>
* After subscription the operation will be performed.
* The {@link Mono} upon successful completion will contain a single resource response with the replaced user
* defined function.
* In case of failure the {@link Mono} will error.
*
* @param udfSettings the cosmos user defined function properties.
* @return an {@link Mono} containing the single resource response with the replaced cosmos user defined function
* or an error.
*/
/**
* Deletes a cosmos user defined function.
* <p>
* After subscription the operation will be performed.
* The {@link Mono} upon successful completion will contain a single resource response for the deleted user
* defined function.
* In case of failure the {@link Mono} will error.
*
* @return an {@link Mono} containing the single resource response for the deleted cosmos user defined function or
* an error.
*/
public Mono<CosmosAsyncUserDefinedFunctionResponse> delete() {
return container.getDatabase()
.getDocClientWrapper()
.deleteUserDefinedFunction(this.getLink(), null)
.map(response -> ModelBridgeInternal.createCosmosAsyncUserDefinedFunctionResponse(response, container))
.single();
}
String getURIPathSegment() {
return Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT;
}
String getParentLink() {
return container.getLink();
}
String getLink() {
StringBuilder builder = new StringBuilder();
builder.append(getParentLink());
builder.append("/");
builder.append(getURIPathSegment());
builder.append("/");
builder.append(getId());
return builder.toString();
}
} |
nit: use `final` | return isAuthorized(RECEIVE_BY_SEQUENCE_NUMBER_OPERATION).thenMany(createRequestResponse.flatMap(channel -> {
final Message message = createManagementMessage(RECEIVE_BY_SEQUENCE_NUMBER_OPERATION,
channel.getReceiveLinkName());
HashMap<String, Object> requestBodyMap = new HashMap<>();
requestBodyMap.put(SEQUENCE_NUMBERS, Arrays.stream(fromSequenceNumbers)
.boxed().toArray(Long[]::new));
requestBodyMap.put(RECEIVER_SETTLE_MODE,
UnsignedInteger.valueOf(receiveMode == ReceiveMode.RECEIVE_AND_DELETE ? 0 : 1));
if (!Objects.isNull(sessionId)) {
requestBodyMap.put(ManagementConstants.REQUEST_RESPONSE_SESSION_ID, sessionId);
}
message.setBody(new AmqpValue(requestBodyMap));
return channel.sendWithAck(message);
}).flatMapMany(amqpMessage -> {
final List<ServiceBusReceivedMessage> messageList =
messageSerializer.deserializeList(amqpMessage, ServiceBusReceivedMessage.class);
if (messageList.size() > 0) {
final ServiceBusReceivedMessage receivedMessage = messageList.get(messageList.size() - 1);
logger.info("Setting last peeked sequence number: {}", receivedMessage.getSequenceNumber());
if (receivedMessage.getSequenceNumber() > 0) {
this.lastPeekedSequenceNumber.set(receivedMessage.getSequenceNumber());
}
}
return Flux.fromIterable(messageList);
})); | HashMap<String, Object> requestBodyMap = new HashMap<>(); | return isAuthorized(RECEIVE_BY_SEQUENCE_NUMBER_OPERATION).thenMany(createRequestResponse.flatMap(channel -> {
final Message message = createManagementMessage(RECEIVE_BY_SEQUENCE_NUMBER_OPERATION,
channel.getReceiveLinkName());
HashMap<String, Object> requestBodyMap = new HashMap<>();
requestBodyMap.put(SEQUENCE_NUMBERS, Arrays.stream(fromSequenceNumbers)
.boxed().toArray(Long[]::new));
requestBodyMap.put(RECEIVER_SETTLE_MODE,
UnsignedInteger.valueOf(receiveMode == ReceiveMode.RECEIVE_AND_DELETE ? 0 : 1));
if (!Objects.isNull(sessionId)) {
requestBodyMap.put(ManagementConstants.REQUEST_RESPONSE_SESSION_ID, sessionId);
}
message.setBody(new AmqpValue(requestBodyMap));
return channel.sendWithAck(message);
} | class ManagementChannel implements ServiceBusManagementNode {
private final Scheduler scheduler;
private final MessageSerializer messageSerializer;
private final TokenManager tokenManager;
private final Duration operationTimeout;
private final Mono<RequestResponseChannel> createRequestResponse;
private final String fullyQualifiedNamespace;
private final ClientLogger logger;
private final String entityPath;
private final AtomicLong lastPeekedSequenceNumber = new AtomicLong();
private volatile boolean isDisposed;
ManagementChannel(Mono<RequestResponseChannel> createRequestResponse, String fullyQualifiedNamespace,
String entityPath, TokenManager tokenManager, MessageSerializer messageSerializer, Scheduler scheduler,
Duration operationTimeout) {
this.createRequestResponse = createRequestResponse;
this.fullyQualifiedNamespace = fullyQualifiedNamespace;
this.logger = new ClientLogger(String.format("%s<%s>", ManagementChannel.class, entityPath));
this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null.");
this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null.");
this.scheduler = Objects.requireNonNull(scheduler, "'scheduler' cannot be null.");
this.tokenManager = Objects.requireNonNull(tokenManager, "'tokenManager' cannot be null.");
this.operationTimeout = operationTimeout;
}
@Override
public Mono<Void> updateDisposition(UUID lockToken, DispositionStatus dispositionStatus, String deadLetterReason,
String deadLetterErrorDescription, Map<String, Object> propertiesToModify) {
return isAuthorized(UPDATE_DISPOSITION_OPERATION).then(createRequestResponse.flatMap(channel -> {
final Message message = createDispositionMessage(new UUID[] {lockToken}, dispositionStatus,
null, null, null, channel.getReceiveLinkName());
return channel.sendWithAck(message);
}).flatMap(response -> {
final int statusCode = RequestResponseUtils.getResponseStatusCode(response);
final AmqpResponseCode responseCode = AmqpResponseCode.fromValue(statusCode);
if (responseCode == AmqpResponseCode.OK) {
return Mono.empty();
} else {
return Mono.error(ExceptionUtil.amqpResponseCodeToException(statusCode, "", getErrorContext()));
}
}));
}
/**
* {@inheritDoc}
*/
@Override
public Mono<Instant> renewMessageLock(UUID lockToken) {
return renewMessageLock(new UUID[]{lockToken})
.next()
.publishOn(scheduler);
}
/**
* {@inheritDoc}
*/
@Override
public Mono<ServiceBusReceivedMessage> peek(long fromSequenceNumber) {
return peek(fromSequenceNumber, 1, null)
.last()
.publishOn(scheduler);
}
/**
* {@inheritDoc}
*/
@Override
public Flux<ServiceBusReceivedMessage> peekBatch(int maxMessages) {
return peek(this.lastPeekedSequenceNumber.get() + 1, maxMessages, null)
.publishOn(scheduler);
}
/**
* {@inheritDoc}
*/
@Override
public Flux<ServiceBusReceivedMessage> peekBatch(int maxMessages, long fromSequenceNumber) {
return peek(fromSequenceNumber, maxMessages, null)
.publishOn(scheduler);
}
/**
* {@inheritDoc}
*/
@Override
public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(ReceiveMode receiveMode, long sequenceNumber) {
return receiveDeferredMessageBatch(receiveMode, null, sequenceNumber)
.next()
.publishOn(scheduler);
}
/**
* {@inheritDoc}
*/
@Override
public Flux<ServiceBusReceivedMessage> receiveDeferredMessageBatch(ReceiveMode receiveMode,
long... sequenceNumbers) {
return receiveDeferredMessageBatch(receiveMode, null, sequenceNumbers)
.publishOn(scheduler);
}
/**
* {@inheritDoc}
*/
@Override
public Mono<ServiceBusReceivedMessage> peek() {
return peek(lastPeekedSequenceNumber.get() + 1);
}
private Flux<ServiceBusReceivedMessage> peek(long fromSequenceNumber, int maxMessages, UUID sessionId) {
return isAuthorized(PEEK_OPERATION).thenMany(createRequestResponse.flatMap(channel -> {
final Message message = createManagementMessage(PEEK_OPERATION, channel.getReceiveLinkName());
HashMap<String, Object> requestBodyMap = new HashMap<>();
requestBodyMap.put(FROM_SEQUENCE_NUMBER, fromSequenceNumber);
requestBodyMap.put(MESSAGE_COUNT_KEY, maxMessages);
if (!Objects.isNull(sessionId)) {
requestBodyMap.put(ManagementConstants.REQUEST_RESPONSE_SESSION_ID, sessionId);
}
message.setBody(new AmqpValue(requestBodyMap));
return channel.sendWithAck(message);
}).flatMapMany(amqpMessage -> {
final List<ServiceBusReceivedMessage> messageList =
messageSerializer.deserializeList(amqpMessage, ServiceBusReceivedMessage.class);
if (messageList.size() > 0) {
final ServiceBusReceivedMessage receivedMessage = messageList.get(messageList.size() - 1);
logger.info("Setting last peeked sequence number: {}", receivedMessage.getSequenceNumber());
if (receivedMessage.getSequenceNumber() > 0) {
this.lastPeekedSequenceNumber.set(receivedMessage.getSequenceNumber());
}
}
return Flux.fromIterable(messageList);
}));
}
private Flux<ServiceBusReceivedMessage> receiveDeferredMessageBatch(ReceiveMode receiveMode, UUID sessionId,
long... fromSequenceNumbers) {
).flatMapMany(amqpMessage -> {
final List<ServiceBusReceivedMessage> messageList =
messageSerializer.deserializeList(amqpMessage, ServiceBusReceivedMessage.class);
if (messageList.size() > 0) {
final ServiceBusReceivedMessage receivedMessage = messageList.get(messageList.size() - 1);
logger.info("Setting last peeked sequence number: {}", receivedMessage.getSequenceNumber());
if (receivedMessage.getSequenceNumber() > 0) {
this.lastPeekedSequenceNumber.set(receivedMessage.getSequenceNumber());
}
}
return Flux.fromIterable(messageList);
}));
}
private Mono<Void> isAuthorized(String operation) {
return tokenManager.getAuthorizationResults().next().flatMap(response -> {
if (response != AmqpResponseCode.ACCEPTED) {
return Mono.error(new AmqpException(false, String.format(
"User does not have authorization to perform operation [%s] on entity [%s]", operation, entityPath),
getErrorContext()));
} else {
return Mono.empty();
}
});
}
private Message createDispositionMessage(UUID[] lockTokens, DispositionStatus dispositionStatus,
String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify,
String linkName) {
logger.verbose("Update disposition of deliveries '{}' to '{}' on entity '{}', session '{}'",
Arrays.toString(lockTokens), dispositionStatus, entityPath, "n/a");
final Message message = createManagementMessage(UPDATE_DISPOSITION_OPERATION, linkName);
final Map<String, Object> requestBody = new HashMap<>();
requestBody.put(LOCK_TOKENS_KEY, lockTokens);
requestBody.put(ManagementConstants.DISPOSITION_STATUS_KEY, dispositionStatus.getValue());
if (deadLetterReason != null) {
requestBody.put(ManagementConstants.DEADLETTER_REASON_KEY, deadLetterReason);
}
if (deadLetterErrorDescription != null) {
requestBody.put(ManagementConstants.DEADLETTER_DESCRIPTION_KEY, deadLetterErrorDescription);
}
if (propertiesToModify != null && propertiesToModify.size() > 0) {
requestBody.put(ManagementConstants.PROPERTIES_TO_MODIFY_KEY, propertiesToModify);
}
message.setBody(new AmqpValue(requestBody));
return message;
}
private Flux<Instant> renewMessageLock(UUID[] renewLockList) {
return isAuthorized(PEEK_OPERATION).thenMany(createRequestResponse.flatMap(channel -> {
Message requestMessage = createManagementMessage(RENEW_LOCK_OPERATION,
channel.getReceiveLinkName());
requestMessage.setBody(new AmqpValue(Collections.singletonMap(LOCK_TOKENS_KEY, renewLockList)));
return channel.sendWithAck(requestMessage);
}).flatMapMany(responseMessage -> {
int statusCode = RequestResponseUtils.getResponseStatusCode(responseMessage);
if (statusCode != AmqpResponseCode.OK.getValue()) {
return Mono.error(ExceptionUtil.amqpResponseCodeToException(statusCode, "Could not renew the lock.",
getErrorContext()));
}
return Flux.fromIterable(messageSerializer.deserializeList(responseMessage, Instant.class));
}));
}
/**
* Creates an AMQP message with the required application properties.
*
* @param operation Management operation to perform (ie. peek, update-disposition, etc.)
* @param linkName Name of receiver link associated with operation.
*
* @return An AMQP message with the required headers.
*/
private Message createManagementMessage(String operation, String linkName) {
final Duration serverTimeout = MessageUtils.adjustServerTimeout(operationTimeout);
final Map<String, Object> applicationProperties = new HashMap<>();
applicationProperties.put(MANAGEMENT_OPERATION_KEY, operation);
applicationProperties.put(SERVER_TIMEOUT, serverTimeout.toMillis());
if (linkName != null && !linkName.isEmpty()) {
applicationProperties.put(ASSOCIATED_LINK_NAME_KEY, linkName);
}
final Message message = Proton.message();
message.setApplicationProperties(new ApplicationProperties(applicationProperties));
return message;
}
private AmqpErrorContext getErrorContext() {
return new SessionErrorContext(fullyQualifiedNamespace, entityPath);
}
/**
* {@inheritDoc}
*/
@Override
public void close() {
if (isDisposed) {
return;
}
isDisposed = true;
tokenManager.close();
}
} | class ManagementChannel implements ServiceBusManagementNode {
private final Scheduler scheduler;
private final MessageSerializer messageSerializer;
private final TokenManager tokenManager;
private final Duration operationTimeout;
private final Mono<RequestResponseChannel> createRequestResponse;
private final String fullyQualifiedNamespace;
private final ClientLogger logger;
private final String entityPath;
private final AtomicLong lastPeekedSequenceNumber = new AtomicLong();
private volatile boolean isDisposed;
ManagementChannel(Mono<RequestResponseChannel> createRequestResponse, String fullyQualifiedNamespace,
String entityPath, TokenManager tokenManager, MessageSerializer messageSerializer, Scheduler scheduler,
Duration operationTimeout) {
this.createRequestResponse = createRequestResponse;
this.fullyQualifiedNamespace = fullyQualifiedNamespace;
this.logger = new ClientLogger(String.format("%s<%s>", ManagementChannel.class, entityPath));
this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null.");
this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null.");
this.scheduler = Objects.requireNonNull(scheduler, "'scheduler' cannot be null.");
this.tokenManager = Objects.requireNonNull(tokenManager, "'tokenManager' cannot be null.");
this.operationTimeout = operationTimeout;
}
@Override
public Mono<Void> updateDisposition(UUID lockToken, DispositionStatus dispositionStatus, String deadLetterReason,
String deadLetterErrorDescription, Map<String, Object> propertiesToModify) {
return isAuthorized(UPDATE_DISPOSITION_OPERATION).then(createRequestResponse.flatMap(channel -> {
final Message message = createDispositionMessage(new UUID[] {lockToken}, dispositionStatus,
null, null, null, channel.getReceiveLinkName());
return channel.sendWithAck(message);
}).flatMap(response -> {
final int statusCode = RequestResponseUtils.getResponseStatusCode(response);
final AmqpResponseCode responseCode = AmqpResponseCode.fromValue(statusCode);
if (responseCode == AmqpResponseCode.OK) {
return Mono.empty();
} else {
return Mono.error(ExceptionUtil.amqpResponseCodeToException(statusCode, "", getErrorContext()));
}
}));
}
/**
* {@inheritDoc}
*/
@Override
public Mono<Instant> renewMessageLock(UUID lockToken) {
return renewMessageLock(new UUID[]{lockToken})
.next();
}
/**
* {@inheritDoc}
*/
@Override
public Mono<ServiceBusReceivedMessage> peek(long fromSequenceNumber) {
return peek(fromSequenceNumber, 1, null)
.last();
}
/**
* {@inheritDoc}
*/
@Override
public Flux<ServiceBusReceivedMessage> peekBatch(int maxMessages) {
return peek(this.lastPeekedSequenceNumber.get() + 1, maxMessages, null);
}
/**
* {@inheritDoc}
*/
@Override
public Flux<ServiceBusReceivedMessage> peekBatch(int maxMessages, long fromSequenceNumber) {
return peek(fromSequenceNumber, maxMessages, null);
}
/**
* {@inheritDoc}
*/
@Override
public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(ReceiveMode receiveMode, long sequenceNumber) {
return receiveDeferredMessageBatch(receiveMode, null, sequenceNumber)
.next();
}
/**
* {@inheritDoc}
*/
@Override
public Flux<ServiceBusReceivedMessage> receiveDeferredMessageBatch(ReceiveMode receiveMode,
long... sequenceNumbers) {
return receiveDeferredMessageBatch(receiveMode, null, sequenceNumbers);
}
/**
* {@inheritDoc}
*/
@Override
public Mono<ServiceBusReceivedMessage> peek() {
return peek(lastPeekedSequenceNumber.get() + 1);
}
private Flux<ServiceBusReceivedMessage> peek(long fromSequenceNumber, int maxMessages, UUID sessionId) {
return isAuthorized(PEEK_OPERATION).thenMany(createRequestResponse.flatMap(channel -> {
final Message message = createManagementMessage(PEEK_OPERATION, channel.getReceiveLinkName());
final HashMap<String, Object> requestBodyMap = new HashMap<>();
requestBodyMap.put(FROM_SEQUENCE_NUMBER, fromSequenceNumber);
requestBodyMap.put(MESSAGE_COUNT_KEY, maxMessages);
if (!Objects.isNull(sessionId)) {
requestBodyMap.put(ManagementConstants.REQUEST_RESPONSE_SESSION_ID, sessionId);
}
message.setBody(new AmqpValue(requestBodyMap));
return channel.sendWithAck(message);
}).flatMapMany(amqpMessage -> {
final List<ServiceBusReceivedMessage> messageList =
messageSerializer.deserializeList(amqpMessage, ServiceBusReceivedMessage.class);
if (messageList.size() > 0) {
final ServiceBusReceivedMessage receivedMessage = messageList.get(messageList.size() - 1);
logger.info("Setting last peeked sequence number: {}", receivedMessage.getSequenceNumber());
if (receivedMessage.getSequenceNumber() > 0) {
this.lastPeekedSequenceNumber.set(receivedMessage.getSequenceNumber());
}
}
return Flux.fromIterable(messageList);
}));
}
private Flux<ServiceBusReceivedMessage> receiveDeferredMessageBatch(ReceiveMode receiveMode, UUID sessionId,
long... fromSequenceNumbers) {
).flatMapMany(amqpMessage -> {
final List<ServiceBusReceivedMessage> messageList =
messageSerializer.deserializeList(amqpMessage, ServiceBusReceivedMessage.class);
if (messageList.size() > 0) {
final ServiceBusReceivedMessage receivedMessage = messageList.get(messageList.size() - 1);
logger.info("Setting last peeked sequence number: {}", receivedMessage.getSequenceNumber());
if (receivedMessage.getSequenceNumber() > 0) {
this.lastPeekedSequenceNumber.set(receivedMessage.getSequenceNumber());
}
}
return Flux.fromIterable(messageList);
}));
}
private Mono<Void> isAuthorized(String operation) {
return tokenManager.getAuthorizationResults().next().flatMap(response -> {
if (response != AmqpResponseCode.ACCEPTED) {
return Mono.error(new AmqpException(false, String.format(
"User does not have authorization to perform operation [%s] on entity [%s]", operation, entityPath),
getErrorContext()));
} else {
return Mono.empty();
}
});
}
private Message createDispositionMessage(UUID[] lockTokens, DispositionStatus dispositionStatus,
String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify,
String linkName) {
logger.verbose("Update disposition of deliveries '{}' to '{}' on entity '{}', session '{}'",
Arrays.toString(lockTokens), dispositionStatus, entityPath, "n/a");
final Message message = createManagementMessage(UPDATE_DISPOSITION_OPERATION, linkName);
final Map<String, Object> requestBody = new HashMap<>();
requestBody.put(LOCK_TOKENS_KEY, lockTokens);
requestBody.put(ManagementConstants.DISPOSITION_STATUS_KEY, dispositionStatus.getValue());
if (deadLetterReason != null) {
requestBody.put(ManagementConstants.DEADLETTER_REASON_KEY, deadLetterReason);
}
if (deadLetterErrorDescription != null) {
requestBody.put(ManagementConstants.DEADLETTER_DESCRIPTION_KEY, deadLetterErrorDescription);
}
if (propertiesToModify != null && propertiesToModify.size() > 0) {
requestBody.put(ManagementConstants.PROPERTIES_TO_MODIFY_KEY, propertiesToModify);
}
message.setBody(new AmqpValue(requestBody));
return message;
}
private Flux<Instant> renewMessageLock(UUID[] renewLockList) {
return isAuthorized(PEEK_OPERATION).thenMany(createRequestResponse.flatMap(channel -> {
Message requestMessage = createManagementMessage(RENEW_LOCK_OPERATION,
channel.getReceiveLinkName());
requestMessage.setBody(new AmqpValue(Collections.singletonMap(LOCK_TOKENS_KEY, renewLockList)));
return channel.sendWithAck(requestMessage);
}).flatMapMany(responseMessage -> {
int statusCode = RequestResponseUtils.getResponseStatusCode(responseMessage);
if (statusCode != AmqpResponseCode.OK.getValue()) {
return Mono.error(ExceptionUtil.amqpResponseCodeToException(statusCode, "Could not renew the lock.",
getErrorContext()));
}
return Flux.fromIterable(messageSerializer.deserializeList(responseMessage, Instant.class));
}));
}
/**
* Creates an AMQP message with the required application properties.
*
* @param operation Management operation to perform (ie. peek, update-disposition, etc.)
* @param linkName Name of receiver link associated with operation.
*
* @return An AMQP message with the required headers.
*/
private Message createManagementMessage(String operation, String linkName) {
final Duration serverTimeout = MessageUtils.adjustServerTimeout(operationTimeout);
final Map<String, Object> applicationProperties = new HashMap<>();
applicationProperties.put(MANAGEMENT_OPERATION_KEY, operation);
applicationProperties.put(SERVER_TIMEOUT, serverTimeout.toMillis());
if (linkName != null && !linkName.isEmpty()) {
applicationProperties.put(ASSOCIATED_LINK_NAME_KEY, linkName);
}
final Message message = Proton.message();
message.setApplicationProperties(new ApplicationProperties(applicationProperties));
return message;
}
private AmqpErrorContext getErrorContext() {
return new SessionErrorContext(fullyQualifiedNamespace, entityPath);
}
/**
* {@inheritDoc}
*/
@Override
public void close() {
if (isDisposed) {
return;
}
isDisposed = true;
tokenManager.close();
}
} |
Should we have a timeout? | public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) {
String azCommand = "az account get-access-token --output json --resource ";
StringBuilder command = new StringBuilder();
command.append(azCommand);
String scopes = ScopeUtil.scopesToResource(request.getScopes());
try {
ScopeUtil.validateScope(scopes);
} catch (IllegalArgumentException ex) {
return Mono.error(logger.logExceptionAsError(ex));
}
command.append(scopes);
AccessToken token = null;
BufferedReader reader = null;
try {
String starter;
String switcher;
if (isWindowsPlatform()) {
starter = WINDOWS_STARTER;
switcher = WINDOWS_SWITCHER;
} else {
starter = LINUX_MAC_STARTER;
switcher = LINUX_MAC_SWITCHER;
}
ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString());
String workingDirectory = getSafeWorkingDirectory();
if (workingDirectory != null) {
builder.directory(new File(workingDirectory));
} else {
throw logger.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be"
+ " found to execute CLI command from."));
}
builder.redirectErrorStream(true);
Process process = builder.start();
reader = new BufferedReader(new InputStreamReader(process.getInputStream(), "UTF-8"));
String line;
StringBuilder output = new StringBuilder();
while (true) {
line = reader.readLine();
if (line == null) {
break;
}
if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) {
throw logger.logExceptionAsError(
new ClientAuthenticationException("Azure CLI not installed", null));
}
output.append(line);
}
String processOutput = output.toString();
process.waitFor();
if (process.exitValue() != 0) {
if (processOutput.length() > 0) {
String redactedOutput = redactInfo("\"accessToken\": \"(.*?)(\"|$)", processOutput);
throw logger.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null));
} else {
throw logger.logExceptionAsError(
new ClientAuthenticationException("Failed to invoke Azure CLI ", null));
}
}
Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class,
SerializerEncoding.JSON);
String accessToken = objectMap.get("accessToken");
String time = objectMap.get("expiresOn");
String timeToSecond = time.substring(0, time.indexOf("."));
String timeJoinedWithT = String.join("T", timeToSecond.split(" "));
OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME)
.atZone(ZoneId.systemDefault())
.toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC);
token = new IdentityToken(accessToken, expiresOn, options);
} catch (IOException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
} catch (InterruptedException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
} catch (RuntimeException e) {
return Mono.error(logger.logExceptionAsError(e));
} finally {
try {
if (reader != null) {
reader.close();
}
} catch (IOException ex) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(ex)));
}
}
return Mono.just(token);
} | process.waitFor(); | public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) {
String azCommand = "az account get-access-token --output json --resource ";
StringBuilder command = new StringBuilder();
command.append(azCommand);
String scopes = ScopeUtil.scopesToResource(request.getScopes());
try {
ScopeUtil.validateScope(scopes);
} catch (IllegalArgumentException ex) {
return Mono.error(logger.logExceptionAsError(ex));
}
command.append(scopes);
AccessToken token = null;
BufferedReader reader = null;
try {
String starter;
String switcher;
if (isWindowsPlatform()) {
starter = WINDOWS_STARTER;
switcher = WINDOWS_SWITCHER;
} else {
starter = LINUX_MAC_STARTER;
switcher = LINUX_MAC_SWITCHER;
}
ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString());
String workingDirectory = getSafeWorkingDirectory();
if (workingDirectory != null) {
builder.directory(new File(workingDirectory));
} else {
throw logger.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be"
+ " found to execute CLI command from."));
}
builder.redirectErrorStream(true);
Process process = builder.start();
reader = new BufferedReader(new InputStreamReader(process.getInputStream(), "UTF-8"));
String line;
StringBuilder output = new StringBuilder();
while (true) {
line = reader.readLine();
if (line == null) {
break;
}
if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) {
throw logger.logExceptionAsError(
new ClientAuthenticationException("Azure CLI not installed", null));
}
output.append(line);
}
String processOutput = output.toString();
process.waitFor(10, TimeUnit.SECONDS);
if (process.exitValue() != 0) {
if (processOutput.length() > 0) {
String redactedOutput = redactInfo("\"accessToken\": \"(.*?)(\"|$)", processOutput);
throw logger.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null));
} else {
throw logger.logExceptionAsError(
new ClientAuthenticationException("Failed to invoke Azure CLI ", null));
}
}
Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class,
SerializerEncoding.JSON);
String accessToken = objectMap.get("accessToken");
String time = objectMap.get("expiresOn");
String timeToSecond = time.substring(0, time.indexOf("."));
String timeJoinedWithT = String.join("T", timeToSecond.split(" "));
OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME)
.atZone(ZoneId.systemDefault())
.toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC);
token = new IdentityToken(accessToken, expiresOn, options);
} catch (IOException | InterruptedException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
} catch (RuntimeException e) {
return Mono.error(logger.logExceptionAsError(e));
} finally {
try {
if (reader != null) {
reader.close();
}
} catch (IOException ex) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(ex)));
}
}
return Mono.just(token);
} | class IdentityClient {
private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter();
private static final Random RANDOM = new Random();
private static final String WINDOWS_STARTER = "cmd.exe";
private static final String LINUX_MAC_STARTER = "/bin/sh";
private static final String WINDOWS_SWITCHER = "/c";
private static final String LINUX_MAC_SWITCHER = "-c";
private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized";
private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found";
private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot");
private static final String DEFAULT_MAC_LINUX_PATH = "/bin/";
private final ClientLogger logger = new ClientLogger(IdentityClient.class);
private final IdentityClientOptions options;
private final PublicClientApplication publicClientApplication;
private final String tenantId;
private final String clientId;
private HttpPipelineAdapter httpPipelineAdapter;
/**
* Creates an IdentityClient with the given options.
*
* @param tenantId the tenant ID of the application.
* @param clientId the client ID of the application.
* @param options the options configuring the client.
*/
IdentityClient(String tenantId, String clientId, IdentityClientOptions options) {
if (tenantId == null) {
tenantId = "common";
}
if (options == null) {
options = new IdentityClientOptions();
}
this.tenantId = tenantId;
this.clientId = clientId;
this.options = options;
if (clientId == null) {
this.publicClientApplication = null;
} else {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId;
PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId);
try {
publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl);
} catch (MalformedURLException e) {
throw logger.logExceptionAsWarning(new IllegalStateException(e));
}
HttpPipeline httpPipeline = options.getHttpPipeline();
if (httpPipeline != null) {
httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline);
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
} else {
HttpClient httpClient = options.getHttpClient();
if (httpClient != null) {
httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient));
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
} else {
httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault()));
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
}
}
if (options.getExecutorService() != null) {
publicClientApplicationBuilder.executorService(options.getExecutorService());
}
this.publicClientApplication = publicClientApplicationBuilder.build();
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
/**
* Asynchronously acquire a token from Active Directory with a client secret.
*
* @param clientSecret the client secret of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromSecret(clientSecret))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
} catch (MalformedURLException e) {
return Mono.error(e);
}
}
private HttpPipeline setupPipeline(HttpClient httpClient) {
List<HttpPipelinePolicy> policies = new ArrayList<>();
HttpLogOptions httpLogOptions = new HttpLogOptions();
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RetryPolicy());
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new HttpPipelineBuilder().httpClient(httpClient)
.policies(policies.toArray(new HttpPipelinePolicy[0])).build();
}
/**
* Asynchronously acquire a token from Active Directory with a PKCS12 certificate.
*
* @param pfxCertificatePath the path to the PKCS12 certificate of the application
* @param pfxCertificatePassword the password protecting the PFX certificate
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword,
TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return Mono.fromCallable(() -> {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
new FileInputStream(pfxCertificatePath), pfxCertificatePassword))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
return applicationBuilder.build();
}).flatMap(application -> Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes())).build())))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory with a PEM certificate.
*
* @param pemCertificatePath the path to the PEM certificate of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath));
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
CertificateUtil.privateKeyFromPem(pemCertificateBytes),
CertificateUtil.publicKeyFromPem(pemCertificateBytes)))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
} catch (IOException e) {
return Mono.error(e);
}
}
/**
* Asynchronously acquire a token from Active Directory with a username and a password.
*
* @param request the details of the token request
* @param username the username of the user
* @param password the password of the user
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request,
String username, String password) {
return Mono.fromFuture(publicClientApplication.acquireToken(
UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray())
.build()))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) {
SilentParameters parameters;
if (msalToken.getAccount() != null) {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build();
} else {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build();
}
return Mono.defer(() -> {
try {
return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters))
.map(ar -> new MsalToken(ar, options));
} catch (MalformedURLException e) {
return Mono.error(e);
}
});
}
/**
* Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide
* a device code for login and the user must meet the challenge by authenticating in a browser on the current or a
* different device.
*
* @param request the details of the token request
* @param deviceCodeConsumer the user provided closure that will consume the device code challenge
* @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device
* code expires
*/
public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request,
Consumer<DeviceCodeInfo> deviceCodeConsumer) {
return Mono.fromFuture(() -> {
DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()),
dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(),
dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build();
return publicClientApplication.acquireToken(parameters);
}).map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow.
*
* @param request the details of the token request
* @param authorizationCode the oauth2 authorization code
* @param redirectUrl the redirectUrl where the authorization code is sent to
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode,
URI redirectUrl) {
return Mono.fromFuture(() -> publicClientApplication.acquireToken(
AuthorizationCodeParameters.builder(authorizationCode, redirectUrl)
.scopes(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The
* credential will run a minimal local HttpServer at the given port, so {@code http:
* listed as a valid reply URL for the application.
*
* @param request the details of the token request
* @param port the port on which the HTTP server is listening
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return AuthorizationCodeListener.create(port)
.flatMap(server -> {
URI redirectUri;
String browserUri;
try {
redirectUri = new URI(String.format("http:
browserUri =
String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt"
+ "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s",
authorityUrl,
clientId,
redirectUri.toString(),
UUID.randomUUID(),
String.join(" ", request.getScopes()));
} catch (URISyntaxException e) {
return server.dispose().then(Mono.error(e));
}
return server.listen()
.mergeWith(Mono.<String>fromRunnable(() -> {
try {
openUrl(browserUri);
} catch (IOException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
}
}).subscribeOn(Schedulers.newSingle("browser")))
.next()
.flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri))
.onErrorResume(t -> server.dispose().then(Mono.error(t)))
.flatMap(msalToken -> server.dispose().then(Mono.just(msalToken)));
});
}
/**
* Asynchronously acquire a token from the App Service Managed Service Identity endpoint.
*
* @param msiEndpoint the endpoint to acquire token from
* @param msiSecret the secret to acquire token with
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret,
TokenRequestContext request) {
return Mono.fromCallable(() -> {
String resource = ScopeUtil.scopesToResource(request.getScopes());
HttpURLConnection connection = null;
StringBuilder payload = new StringBuilder();
payload.append("resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
payload.append("&api-version=");
payload.append(URLEncoder.encode("2017-09-01", "UTF-8"));
if (clientId != null) {
payload.append("&clientid=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
try {
URL url = new URL(String.format("%s?%s", msiEndpoint, payload));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
if (msiSecret != null) {
connection.setRequestProperty("Secret", msiSecret);
}
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name())
.useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
MSIToken msiToken = SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON);
return new IdentityToken(msiToken.getToken(), msiToken.getExpiresAt(), options);
} finally {
if (connection != null) {
connection.disconnect();
}
}
});
}
/**
* Asynchronously acquire a token from the Virtual Machine IMDS endpoint.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) {
String resource = ScopeUtil.scopesToResource(request.getScopes());
StringBuilder payload = new StringBuilder();
final int imdsUpgradeTimeInMs = 70 * 1000;
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
payload.append("&resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
if (clientId != null) {
payload.append("&client_id=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
} catch (IOException exception) {
return Mono.error(exception);
}
return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> {
int retry = 1;
while (retry <= options.getMaxRetry()) {
URL url = null;
HttpURLConnection connection = null;
try {
url =
new URL(String.format("http:
payload.toString()));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name())
.useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
MSIToken msiToken = SERIALIZER_ADAPTER.deserialize(result,
MSIToken.class, SerializerEncoding.JSON);
return new IdentityToken(msiToken.getToken(), msiToken.getExpiresAt(), options);
} catch (IOException exception) {
if (connection == null) {
throw logger.logExceptionAsError(new RuntimeException(
String.format("Could not connect to the url: %s.", url), exception));
}
int responseCode = connection.getResponseCode();
if (responseCode == 410
|| responseCode == 429
|| responseCode == 404
|| (responseCode >= 500 && responseCode <= 599)) {
int retryTimeoutInMs = options.getRetryTimeout()
.apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000;
retryTimeoutInMs =
(responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs
: retryTimeoutInMs;
retry++;
if (retry > options.getMaxRetry()) {
break;
} else {
sleep(retryTimeoutInMs);
}
} else {
throw logger.logExceptionAsError(new RuntimeException(
"Couldn't acquire access token from IMDS, verify your objectId, "
+ "clientId or msiResourceId", exception));
}
} finally {
if (connection != null) {
connection.disconnect();
}
}
}
throw logger.logExceptionAsError(new RuntimeException(
String.format("MSI: Failed to acquire tokens after retrying %s times",
options.getMaxRetry())));
}));
}
private Mono<Boolean> checkIMDSAvailable() {
StringBuilder payload = new StringBuilder();
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
} catch (IOException exception) {
return Mono.error(exception);
}
return Mono.fromCallable(() -> {
HttpURLConnection connection = null;
URL url = new URL(String.format("http:
payload.toString()));
try {
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setConnectTimeout(500);
connection.connect();
} finally {
if (connection != null) {
connection.disconnect();
}
}
return true;
});
}
private static void sleep(int millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException ex) {
throw new IllegalStateException(ex);
}
}
private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) {
switch (options.getType()) {
case SOCKS4:
case SOCKS5:
return new Proxy(Type.SOCKS, options.getAddress());
case HTTP:
default:
return new Proxy(Type.HTTP, options.getAddress());
}
}
private String getSafeWorkingDirectory() {
if (isWindowsPlatform()) {
if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) {
return null;
}
return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32";
} else {
return DEFAULT_MAC_LINUX_PATH;
}
}
private boolean isWindowsPlatform() {
return System.getProperty("os.name").contains("Windows");
}
private String redactInfo(String regex, String input) {
return input.replaceAll(regex, "****");
}
void openUrl(String url) throws IOException {
Runtime rt = Runtime.getRuntime();
String os = System.getProperty("os.name").toLowerCase(Locale.ROOT);
if (os.contains("win")) {
rt.exec("rundll32 url.dll,FileProtocolHandler " + url);
} else if (os.contains("mac")) {
rt.exec("open " + url);
} else if (os.contains("nix") || os.contains("nux")) {
rt.exec("xdg-open " + url);
} else {
logger.error("Browser could not be opened - please open {} in a browser on this device.", url);
}
}
} | class IdentityClient {
private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter();
private static final Random RANDOM = new Random();
private static final String WINDOWS_STARTER = "cmd.exe";
private static final String LINUX_MAC_STARTER = "/bin/sh";
private static final String WINDOWS_SWITCHER = "/c";
private static final String LINUX_MAC_SWITCHER = "-c";
private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized";
private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found";
private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot");
private static final String DEFAULT_MAC_LINUX_PATH = "/bin/";
private final ClientLogger logger = new ClientLogger(IdentityClient.class);
private final IdentityClientOptions options;
private final PublicClientApplication publicClientApplication;
private final String tenantId;
private final String clientId;
private HttpPipelineAdapter httpPipelineAdapter;
/**
* Creates an IdentityClient with the given options.
*
* @param tenantId the tenant ID of the application.
* @param clientId the client ID of the application.
* @param options the options configuring the client.
*/
IdentityClient(String tenantId, String clientId, IdentityClientOptions options) {
if (tenantId == null) {
tenantId = "common";
}
if (options == null) {
options = new IdentityClientOptions();
}
this.tenantId = tenantId;
this.clientId = clientId;
this.options = options;
if (clientId == null) {
this.publicClientApplication = null;
} else {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId;
PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId);
try {
publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl);
} catch (MalformedURLException e) {
throw logger.logExceptionAsWarning(new IllegalStateException(e));
}
HttpPipeline httpPipeline = options.getHttpPipeline();
if (httpPipeline != null) {
httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline);
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
} else {
HttpClient httpClient = options.getHttpClient();
if (httpClient != null) {
httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient));
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
} else {
httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault()));
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
}
}
if (options.getExecutorService() != null) {
publicClientApplicationBuilder.executorService(options.getExecutorService());
}
this.publicClientApplication = publicClientApplicationBuilder.build();
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
/**
* Asynchronously acquire a token from Active Directory with a client secret.
*
* @param clientSecret the client secret of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromSecret(clientSecret))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
} catch (MalformedURLException e) {
return Mono.error(e);
}
}
private HttpPipeline setupPipeline(HttpClient httpClient) {
List<HttpPipelinePolicy> policies = new ArrayList<>();
HttpLogOptions httpLogOptions = new HttpLogOptions();
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RetryPolicy());
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new HttpPipelineBuilder().httpClient(httpClient)
.policies(policies.toArray(new HttpPipelinePolicy[0])).build();
}
/**
* Asynchronously acquire a token from Active Directory with a PKCS12 certificate.
*
* @param pfxCertificatePath the path to the PKCS12 certificate of the application
* @param pfxCertificatePassword the password protecting the PFX certificate
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword,
TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return Mono.fromCallable(() -> {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
new FileInputStream(pfxCertificatePath), pfxCertificatePassword))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
return applicationBuilder.build();
}).flatMap(application -> Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes())).build())))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory with a PEM certificate.
*
* @param pemCertificatePath the path to the PEM certificate of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath));
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
CertificateUtil.privateKeyFromPem(pemCertificateBytes),
CertificateUtil.publicKeyFromPem(pemCertificateBytes)))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
} catch (IOException e) {
return Mono.error(e);
}
}
/**
* Asynchronously acquire a token from Active Directory with a username and a password.
*
* @param request the details of the token request
* @param username the username of the user
* @param password the password of the user
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request,
String username, String password) {
return Mono.fromFuture(publicClientApplication.acquireToken(
UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray())
.build()))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) {
SilentParameters parameters;
if (msalToken.getAccount() != null) {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build();
} else {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build();
}
return Mono.defer(() -> {
try {
return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters))
.map(ar -> new MsalToken(ar, options));
} catch (MalformedURLException e) {
return Mono.error(e);
}
});
}
/**
* Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide
* a device code for login and the user must meet the challenge by authenticating in a browser on the current or a
* different device.
*
* @param request the details of the token request
* @param deviceCodeConsumer the user provided closure that will consume the device code challenge
* @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device
* code expires
*/
public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request,
Consumer<DeviceCodeInfo> deviceCodeConsumer) {
return Mono.fromFuture(() -> {
DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()),
dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(),
dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build();
return publicClientApplication.acquireToken(parameters);
}).map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow.
*
* @param request the details of the token request
* @param authorizationCode the oauth2 authorization code
* @param redirectUrl the redirectUrl where the authorization code is sent to
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode,
URI redirectUrl) {
return Mono.fromFuture(() -> publicClientApplication.acquireToken(
AuthorizationCodeParameters.builder(authorizationCode, redirectUrl)
.scopes(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The
* credential will run a minimal local HttpServer at the given port, so {@code http:
* listed as a valid reply URL for the application.
*
* @param request the details of the token request
* @param port the port on which the HTTP server is listening
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return AuthorizationCodeListener.create(port)
.flatMap(server -> {
URI redirectUri;
String browserUri;
try {
redirectUri = new URI(String.format("http:
browserUri =
String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt"
+ "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s",
authorityUrl,
clientId,
redirectUri.toString(),
UUID.randomUUID(),
String.join(" ", request.getScopes()));
} catch (URISyntaxException e) {
return server.dispose().then(Mono.error(e));
}
return server.listen()
.mergeWith(Mono.<String>fromRunnable(() -> {
try {
openUrl(browserUri);
} catch (IOException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
}
}).subscribeOn(Schedulers.newSingle("browser")))
.next()
.flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri))
.onErrorResume(t -> server.dispose().then(Mono.error(t)))
.flatMap(msalToken -> server.dispose().then(Mono.just(msalToken)));
});
}
/**
* Asynchronously acquire a token from the App Service Managed Service Identity endpoint.
*
* @param msiEndpoint the endpoint to acquire token from
* @param msiSecret the secret to acquire token with
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret,
TokenRequestContext request) {
return Mono.fromCallable(() -> {
String resource = ScopeUtil.scopesToResource(request.getScopes());
HttpURLConnection connection = null;
StringBuilder payload = new StringBuilder();
payload.append("resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
payload.append("&api-version=");
payload.append(URLEncoder.encode("2017-09-01", "UTF-8"));
if (clientId != null) {
payload.append("&clientid=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
try {
URL url = new URL(String.format("%s?%s", msiEndpoint, payload));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
if (msiSecret != null) {
connection.setRequestProperty("Secret", msiSecret);
}
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name())
.useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
MSIToken msiToken = SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON);
return new IdentityToken(msiToken.getToken(), msiToken.getExpiresAt(), options);
} finally {
if (connection != null) {
connection.disconnect();
}
}
});
}
/**
* Asynchronously acquire a token from the Virtual Machine IMDS endpoint.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) {
String resource = ScopeUtil.scopesToResource(request.getScopes());
StringBuilder payload = new StringBuilder();
final int imdsUpgradeTimeInMs = 70 * 1000;
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
payload.append("&resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
if (clientId != null) {
payload.append("&client_id=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
} catch (IOException exception) {
return Mono.error(exception);
}
return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> {
int retry = 1;
while (retry <= options.getMaxRetry()) {
URL url = null;
HttpURLConnection connection = null;
try {
url =
new URL(String.format("http:
payload.toString()));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name())
.useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
MSIToken msiToken = SERIALIZER_ADAPTER.deserialize(result,
MSIToken.class, SerializerEncoding.JSON);
return new IdentityToken(msiToken.getToken(), msiToken.getExpiresAt(), options);
} catch (IOException exception) {
if (connection == null) {
throw logger.logExceptionAsError(new RuntimeException(
String.format("Could not connect to the url: %s.", url), exception));
}
int responseCode = connection.getResponseCode();
if (responseCode == 410
|| responseCode == 429
|| responseCode == 404
|| (responseCode >= 500 && responseCode <= 599)) {
int retryTimeoutInMs = options.getRetryTimeout()
.apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000;
retryTimeoutInMs =
(responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs
: retryTimeoutInMs;
retry++;
if (retry > options.getMaxRetry()) {
break;
} else {
sleep(retryTimeoutInMs);
}
} else {
throw logger.logExceptionAsError(new RuntimeException(
"Couldn't acquire access token from IMDS, verify your objectId, "
+ "clientId or msiResourceId", exception));
}
} finally {
if (connection != null) {
connection.disconnect();
}
}
}
throw logger.logExceptionAsError(new RuntimeException(
String.format("MSI: Failed to acquire tokens after retrying %s times",
options.getMaxRetry())));
}));
}
private Mono<Boolean> checkIMDSAvailable() {
StringBuilder payload = new StringBuilder();
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
} catch (IOException exception) {
return Mono.error(exception);
}
return Mono.fromCallable(() -> {
HttpURLConnection connection = null;
URL url = new URL(String.format("http:
payload.toString()));
try {
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setConnectTimeout(500);
connection.connect();
} finally {
if (connection != null) {
connection.disconnect();
}
}
return true;
});
}
private static void sleep(int millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException ex) {
throw new IllegalStateException(ex);
}
}
private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) {
switch (options.getType()) {
case SOCKS4:
case SOCKS5:
return new Proxy(Type.SOCKS, options.getAddress());
case HTTP:
default:
return new Proxy(Type.HTTP, options.getAddress());
}
}
private String getSafeWorkingDirectory() {
if (isWindowsPlatform()) {
if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) {
return null;
}
return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32";
} else {
return DEFAULT_MAC_LINUX_PATH;
}
}
private boolean isWindowsPlatform() {
return System.getProperty("os.name").contains("Windows");
}
private String redactInfo(String regex, String input) {
return input.replaceAll(regex, "****");
}
void openUrl(String url) throws IOException {
Runtime rt = Runtime.getRuntime();
String os = System.getProperty("os.name").toLowerCase(Locale.ROOT);
if (os.contains("win")) {
rt.exec("rundll32 url.dll,FileProtocolHandler " + url);
} else if (os.contains("mac")) {
rt.exec("open " + url);
} else if (os.contains("nix") || os.contains("nux")) {
rt.exec("xdg-open " + url);
} else {
logger.error("Browser could not be opened - please open {} in a browser on this device.", url);
}
}
} |
Where is the case with multiple numbers? | void receiveDeferredWithSequenceOneMessage() {
final int fromSequenceNumber = 10;
final ServiceBusReceivedMessage receivedMessage = mock(ServiceBusReceivedMessage.class);
when(managementNode.receiveDeferredMessage(receiveOptions.getReceiveMode(), fromSequenceNumber)).thenReturn(Mono.just(receivedMessage));
StepVerifier.create(consumer.receiveDeferredMessage(fromSequenceNumber))
.expectNext(receivedMessage)
.verifyComplete();
} | StepVerifier.create(consumer.receiveDeferredMessage(fromSequenceNumber)) | void receiveDeferredWithSequenceOneMessage() {
final int fromSequenceNumber = 10;
final ServiceBusReceivedMessage receivedMessage = mock(ServiceBusReceivedMessage.class);
when(managementNode.receiveDeferredMessage(receiveOptions.getReceiveMode(), fromSequenceNumber)).thenReturn(Mono.just(receivedMessage));
StepVerifier.create(consumer.receiveDeferredMessage(fromSequenceNumber))
.expectNext(receivedMessage)
.verifyComplete();
} | class ServiceBusReceiverAsyncClientTest {
private static final String PAYLOAD = "hello";
private static final byte[] PAYLOAD_BYTES = PAYLOAD.getBytes(UTF_8);
private static final int PREFETCH = 5;
private static final String NAMESPACE = "my-namespace-foo";
private static final String ENTITY_PATH = "queue-name";
private static final MessagingEntityType ENTITY_TYPE = MessagingEntityType.QUEUE;
private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClientTest.class);
private final String messageTrackingUUID = UUID.randomUUID().toString();
private final DirectProcessor<AmqpEndpointState> endpointProcessor = DirectProcessor.create();
private final FluxSink<AmqpEndpointState> endpointSink = endpointProcessor.sink(FluxSink.OverflowStrategy.BUFFER);
private final DirectProcessor<Message> messageProcessor = DirectProcessor.create();
private final FluxSink<Message> messageSink = messageProcessor.sink(FluxSink.OverflowStrategy.BUFFER);
private ServiceBusConnectionProcessor connectionProcessor;
private ServiceBusReceiverAsyncClient consumer;
private ReceiveMessageOptions receiveOptions;
@Mock
private AmqpReceiveLink amqpReceiveLink;
@Mock
private ServiceBusAmqpConnection connection;
@Mock
private TokenCredential tokenCredential;
@Mock
private MessageSerializer messageSerializer;
@Mock
private TracerProvider tracerProvider;
@Mock
private ServiceBusManagementNode managementNode;
@Mock
private ServiceBusReceivedMessage receivedMessage;
@Mock
private ServiceBusReceivedMessage receivedMessage2;
@BeforeAll
static void beforeAll() {
StepVerifier.setDefaultTimeout(Duration.ofSeconds(100));
}
@AfterAll
static void afterAll() {
StepVerifier.resetDefaultTimeout();
}
@BeforeEach
void setup(TestInfo testInfo) {
logger.info("[{}] Setting up.", testInfo.getDisplayName());
MockitoAnnotations.initMocks(this);
when(amqpReceiveLink.receive()).thenReturn(messageProcessor.publishOn(Schedulers.single()));
when(amqpReceiveLink.getEndpointStates()).thenReturn(endpointProcessor);
ConnectionOptions connectionOptions = new ConnectionOptions(NAMESPACE, tokenCredential,
CbsAuthorizationType.SHARED_ACCESS_SIGNATURE, AmqpTransportType.AMQP, new AmqpRetryOptions(),
ProxyOptions.SYSTEM_DEFAULTS, Schedulers.boundedElastic());
when(connection.getEndpointStates()).thenReturn(endpointProcessor);
endpointSink.next(AmqpEndpointState.ACTIVE);
when(connection.getManagementNode(ENTITY_PATH, ENTITY_TYPE)).thenReturn(Mono.just(managementNode));
when(connection.createReceiveLink(anyString(), anyString(), any(ReceiveMode.class), anyBoolean(), any(),
any(MessagingEntityType.class))).thenReturn(Mono.just(amqpReceiveLink));
connectionProcessor =
Flux.<ServiceBusAmqpConnection>create(sink -> sink.next(connection))
.subscribeWith(new ServiceBusConnectionProcessor(connectionOptions.getFullyQualifiedNamespace(),
ENTITY_PATH, connectionOptions.getRetry()));
receiveOptions = new ReceiveMessageOptions().setPrefetchCount(PREFETCH);
consumer = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE,
false, receiveOptions, connectionProcessor, tracerProvider, messageSerializer);
}
@AfterEach
void teardown(TestInfo testInfo) {
logger.info("[{}] Tearing down.", testInfo.getDisplayName());
consumer.close();
Mockito.framework().clearInlineMocks();
}
/**
* Verifies that when user calls peek more than one time, It returns different object.
*/
@SuppressWarnings("unchecked")
@Test
void peekTwoMessages() {
when(managementNode.peek()).thenReturn(Mono.just(receivedMessage), Mono.just(receivedMessage2));
StepVerifier.create(consumer.peek())
.expectNext(receivedMessage)
.verifyComplete();
StepVerifier.create(consumer.peek())
.expectNext(receivedMessage2)
.verifyComplete();
}
/**
* Verifies that this peek one messages from a sequence Number.
*/
@Test
void peekWithSequenceOneMessage() {
final int fromSequenceNumber = 10;
final ServiceBusReceivedMessage receivedMessage = mock(ServiceBusReceivedMessage.class);
when(managementNode.peek(fromSequenceNumber)).thenReturn(Mono.just(receivedMessage));
StepVerifier.create(consumer.peek(fromSequenceNumber))
.expectNext(receivedMessage)
.verifyComplete();
}
/**
* Verifies that this receives a number of messages. Verifies that the initial credits we add are equal to the
* prefetch value.
*/
@Test
void receivesNumberOfEvents() {
final int numberOfEvents = 1;
final List<Message> messages = getMessages(10);
when(messageSerializer.deserialize(any(Message.class), eq(ServiceBusReceivedMessage.class)))
.thenReturn(mock(ServiceBusReceivedMessage.class));
StepVerifier.create(consumer.receive().take(numberOfEvents))
.then(() -> messages.forEach(m -> messageSink.next(m)))
.expectNextCount(numberOfEvents)
.verifyComplete();
verify(amqpReceiveLink).addCredits(PREFETCH);
}
/**
* Verifies that we can receive messages from the processor.
*/
@Test
void receivesAndAutoCompletes() {
final ReceiveMessageOptions options = new ReceiveMessageOptions().setPrefetchCount(PREFETCH)
.setAutoComplete(true);
final ServiceBusReceiverAsyncClient consumer2 = new ServiceBusReceiverAsyncClient(
NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, false, options, connectionProcessor,
tracerProvider, messageSerializer);
final UUID lockToken1 = UUID.randomUUID();
final UUID lockToken2 = UUID.randomUUID();
final Instant expiration = Instant.now().plus(Duration.ofMinutes(1));
final MessageWithLockToken message = mock(MessageWithLockToken.class);
final MessageWithLockToken message2 = mock(MessageWithLockToken.class);
when(message.getLockToken()).thenReturn(lockToken1);
when(message2.getLockToken()).thenReturn(lockToken2);
when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage);
when(messageSerializer.deserialize(message2, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage2);
when(receivedMessage.getLockToken()).thenReturn(lockToken1);
when(receivedMessage.getLockedUntil()).thenReturn(expiration);
when(receivedMessage2.getLockToken()).thenReturn(lockToken2);
when(receivedMessage2.getLockedUntil()).thenReturn(expiration);
when(connection.getManagementNode(ENTITY_PATH, ENTITY_TYPE))
.thenReturn(Mono.just(managementNode));
when(managementNode.updateDisposition(eq(lockToken1), eq(DispositionStatus.COMPLETED), isNull(), isNull(), isNull()))
.thenReturn(Mono.empty());
when(managementNode.updateDisposition(eq(lockToken2), eq(DispositionStatus.COMPLETED), isNull(), isNull(), isNull()))
.thenReturn(Mono.empty());
StepVerifier.create(consumer2.receive().take(2))
.then(() -> {
messageSink.next(message);
messageSink.next(message2);
})
.expectNext(receivedMessage)
.expectNext(receivedMessage2)
.thenAwait(Duration.ofSeconds(5))
.verifyComplete();
logger.info("Verifying assertions.");
verify(managementNode).updateDisposition(eq(lockToken1), eq(DispositionStatus.COMPLETED), isNull(), isNull(), isNull());
verify(managementNode).updateDisposition(eq(lockToken2), eq(DispositionStatus.COMPLETED), isNull(), isNull(), isNull());
}
/**
* Verifies that if there is no lock token, the message is not completed.
*/
@Test
void receivesAndAutoCompleteWithoutLockToken() {
final ReceiveMessageOptions options = new ReceiveMessageOptions().setPrefetchCount(PREFETCH)
.setAutoComplete(true);
final ServiceBusReceiverAsyncClient consumer2 = new ServiceBusReceiverAsyncClient(
NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, false, options, connectionProcessor,
tracerProvider, messageSerializer);
final MessageWithLockToken message = mock(MessageWithLockToken.class);
final MessageWithLockToken message2 = mock(MessageWithLockToken.class);
when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage);
when(messageSerializer.deserialize(message2, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage2);
when(connection.getManagementNode(ENTITY_PATH, ENTITY_TYPE))
.thenReturn(Mono.just(managementNode));
when(managementNode.updateDisposition(any(), eq(DispositionStatus.COMPLETED), isNull(), isNull(), isNull()))
.thenReturn(Mono.delay(Duration.ofMillis(250)).then());
try {
StepVerifier.create(consumer2.receive().take(2))
.then(() -> {
messageSink.next(message);
messageSink.next(message2);
})
.expectNext(receivedMessage)
.expectNext(receivedMessage2)
.verifyComplete();
} finally {
consumer2.close();
}
verifyZeroInteractions(managementNode);
}
/**
* Verifies that we error if we try to complete a message without a lock token.
*/
@Test
void completeNullLockToken() {
when(connection.getManagementNode(ENTITY_PATH, ENTITY_TYPE)).thenReturn(Mono.just(managementNode));
when(managementNode.updateDisposition(any(), eq(DispositionStatus.COMPLETED), isNull(), isNull(), isNull()))
.thenReturn(Mono.delay(Duration.ofMillis(250)).then());
when(receivedMessage.getLockToken()).thenReturn(null);
StepVerifier.create(consumer.complete(receivedMessage))
.expectError(IllegalArgumentException.class)
.verify();
verify(managementNode, times(0))
.updateDisposition(any(), eq(DispositionStatus.COMPLETED), isNull(), isNull(), isNull());
}
/**
* Verifies that we error if we try to complete a null message.
*/
@Test
void completeNullMessage() {
StepVerifier.create(consumer.complete(null)).expectError(NullPointerException.class).verify();
}
/**
* Verifies that we error if we complete in RECEIVE_AND_DELETE mode.
*/
@Test
void completeInReceiveAndDeleteMode() {
final ReceiveMessageOptions options = new ReceiveMessageOptions()
.setAutoComplete(false)
.setReceiveMode(ReceiveMode.RECEIVE_AND_DELETE);
ServiceBusReceiverAsyncClient client = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH,
MessagingEntityType.QUEUE, false, options, connectionProcessor, tracerProvider,
messageSerializer);
final UUID lockToken1 = UUID.randomUUID();
when(receivedMessage.getLockToken()).thenReturn(lockToken1);
try {
StepVerifier.create(client.complete(receivedMessage))
.expectError(UnsupportedOperationException.class)
.verify();
} finally {
client.close();
}
}
/**
* Verifies that this peek batch of messages.
*/
@Test
void peekBatchMessages() {
final int numberOfEvents = 2;
when(managementNode.peekBatch(numberOfEvents))
.thenReturn(Flux.fromArray(new ServiceBusReceivedMessage[]{receivedMessage, receivedMessage2}));
StepVerifier.create(consumer.peekBatch(numberOfEvents))
.expectNextCount(numberOfEvents)
.verifyComplete();
}
/**
* Verifies that this peek batch of messages from a sequence Number.
*/
@Test
void peekBatchWithSequenceNumberMessages() {
final int numberOfEvents = 2;
final int fromSequenceNumber = 10;
when(managementNode.peekBatch(numberOfEvents, fromSequenceNumber))
.thenReturn(Flux.fromArray(new ServiceBusReceivedMessage[]{receivedMessage, receivedMessage2}));
StepVerifier.create(consumer.peekBatch(numberOfEvents, fromSequenceNumber))
.expectNext(receivedMessage, receivedMessage2)
.verifyComplete();
}
/**
* Verifies that we can deadletter a message with an error and description.
*/
@Test
void deadLetterWithDescription() {
final UUID lockToken1 = UUID.randomUUID();
final String description = "some-dead-letter-description";
final String reason = "dead-letter-reason";
final Map<String, Object> propertiesToModify = new HashMap<>();
propertiesToModify.put("something", true);
final Instant expiration = Instant.now().plus(Duration.ofMinutes(5));
final MessageWithLockToken message = mock(MessageWithLockToken.class);
when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage);
when(receivedMessage.getLockToken()).thenReturn(lockToken1);
when(receivedMessage.getLockedUntil()).thenReturn(expiration);
when(connection.getManagementNode(ENTITY_PATH, ENTITY_TYPE)).thenReturn(Mono.just(managementNode));
when(managementNode.updateDisposition(lockToken1, DispositionStatus.SUSPENDED, reason, description, propertiesToModify))
.thenReturn(Mono.empty());
StepVerifier.create(consumer.receive()
.take(1)
.flatMap(m -> consumer.deadLetter(m, reason, description, propertiesToModify)))
.then(() -> messageSink.next(message))
.expectNext()
.verifyComplete();
verify(managementNode).updateDisposition(lockToken1, DispositionStatus.SUSPENDED, reason, description, propertiesToModify);
}
/**
* Verifies that the user can complete settlement methods on received message.
*/
@ParameterizedTest
@EnumSource(DispositionStatus.class)
void settleMessage(DispositionStatus dispositionStatus) {
final UUID lockToken1 = UUID.randomUUID();
final UUID lockToken2 = UUID.randomUUID();
final Instant expiration = Instant.now().plus(Duration.ofMinutes(5));
final MessageWithLockToken message = mock(MessageWithLockToken.class);
final MessageWithLockToken message2 = mock(MessageWithLockToken.class);
when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage);
when(messageSerializer.deserialize(message2, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage2);
when(receivedMessage.getLockToken()).thenReturn(lockToken1);
when(receivedMessage.getLockedUntil()).thenReturn(expiration);
when(receivedMessage2.getLockToken()).thenReturn(lockToken2);
when(receivedMessage2.getLockedUntil()).thenReturn(expiration);
when(connection.getManagementNode(ENTITY_PATH, ENTITY_TYPE))
.thenReturn(Mono.just(managementNode));
when(managementNode.updateDisposition(lockToken1, dispositionStatus, null, null, null))
.thenReturn(Mono.empty());
when(managementNode.updateDisposition(lockToken2, dispositionStatus, null, null, null))
.thenReturn(Mono.empty());
StepVerifier.create(consumer.receive().take(2))
.then(() -> {
messageSink.next(message);
messageSink.next(message2);
})
.expectNext(receivedMessage)
.expectNext(receivedMessage2)
.thenAwait(Duration.ofSeconds(5))
.verifyComplete();
final Mono<Void> operation;
switch (dispositionStatus) {
case DEFERRED:
operation = consumer.defer(receivedMessage);
break;
case ABANDONED:
operation = consumer.abandon(receivedMessage);
break;
case COMPLETED:
operation = consumer.complete(receivedMessage);
break;
case SUSPENDED:
operation = consumer.deadLetter(receivedMessage);
break;
default:
throw new IllegalArgumentException("Unrecognized operation: " + dispositionStatus);
}
StepVerifier.create(operation)
.verifyComplete();
verify(managementNode).updateDisposition(lockToken1, dispositionStatus, null, null, null);
verify(managementNode, times(0)).updateDisposition(lockToken2, dispositionStatus, null, null, null);
}
/**
* Verifies that this receive deferred one messages from a sequence Number.
*/
@Test
private List<Message> getMessages(int numberOfEvents) {
final Map<String, String> map = Collections.singletonMap("SAMPLE_HEADER", "foo");
return IntStream.range(0, numberOfEvents)
.mapToObj(index -> getMessage(PAYLOAD_BYTES, messageTrackingUUID, map))
.collect(Collectors.toList());
}
} | class ServiceBusReceiverAsyncClientTest {
private static final String PAYLOAD = "hello";
private static final byte[] PAYLOAD_BYTES = PAYLOAD.getBytes(UTF_8);
private static final int PREFETCH = 5;
private static final String NAMESPACE = "my-namespace-foo";
private static final String ENTITY_PATH = "queue-name";
private static final MessagingEntityType ENTITY_TYPE = MessagingEntityType.QUEUE;
private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClientTest.class);
private final String messageTrackingUUID = UUID.randomUUID().toString();
private final DirectProcessor<AmqpEndpointState> endpointProcessor = DirectProcessor.create();
private final FluxSink<AmqpEndpointState> endpointSink = endpointProcessor.sink(FluxSink.OverflowStrategy.BUFFER);
private final DirectProcessor<Message> messageProcessor = DirectProcessor.create();
private final FluxSink<Message> messageSink = messageProcessor.sink(FluxSink.OverflowStrategy.BUFFER);
private ServiceBusConnectionProcessor connectionProcessor;
private ServiceBusReceiverAsyncClient consumer;
private ReceiveMessageOptions receiveOptions;
@Mock
private AmqpReceiveLink amqpReceiveLink;
@Mock
private ServiceBusAmqpConnection connection;
@Mock
private TokenCredential tokenCredential;
@Mock
private MessageSerializer messageSerializer;
@Mock
private TracerProvider tracerProvider;
@Mock
private ServiceBusManagementNode managementNode;
@Mock
private ServiceBusReceivedMessage receivedMessage;
@Mock
private ServiceBusReceivedMessage receivedMessage2;
@BeforeAll
static void beforeAll() {
StepVerifier.setDefaultTimeout(Duration.ofSeconds(100));
}
@AfterAll
static void afterAll() {
StepVerifier.resetDefaultTimeout();
}
@BeforeEach
void setup(TestInfo testInfo) {
logger.info("[{}] Setting up.", testInfo.getDisplayName());
MockitoAnnotations.initMocks(this);
when(amqpReceiveLink.receive()).thenReturn(messageProcessor.publishOn(Schedulers.single()));
when(amqpReceiveLink.getEndpointStates()).thenReturn(endpointProcessor);
ConnectionOptions connectionOptions = new ConnectionOptions(NAMESPACE, tokenCredential,
CbsAuthorizationType.SHARED_ACCESS_SIGNATURE, AmqpTransportType.AMQP, new AmqpRetryOptions(),
ProxyOptions.SYSTEM_DEFAULTS, Schedulers.boundedElastic());
when(connection.getEndpointStates()).thenReturn(endpointProcessor);
endpointSink.next(AmqpEndpointState.ACTIVE);
when(connection.getManagementNode(ENTITY_PATH, ENTITY_TYPE)).thenReturn(Mono.just(managementNode));
when(connection.createReceiveLink(anyString(), anyString(), any(ReceiveMode.class), anyBoolean(), any(),
any(MessagingEntityType.class))).thenReturn(Mono.just(amqpReceiveLink));
connectionProcessor =
Flux.<ServiceBusAmqpConnection>create(sink -> sink.next(connection))
.subscribeWith(new ServiceBusConnectionProcessor(connectionOptions.getFullyQualifiedNamespace(),
ENTITY_PATH, connectionOptions.getRetry()));
receiveOptions = new ReceiveMessageOptions().setPrefetchCount(PREFETCH);
consumer = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE,
false, receiveOptions, connectionProcessor, tracerProvider, messageSerializer);
}
@AfterEach
void teardown(TestInfo testInfo) {
logger.info("[{}] Tearing down.", testInfo.getDisplayName());
consumer.close();
Mockito.framework().clearInlineMocks();
}
/**
* Verifies that when user calls peek more than one time, It returns different object.
*/
@SuppressWarnings("unchecked")
@Test
void peekTwoMessages() {
when(managementNode.peek()).thenReturn(Mono.just(receivedMessage), Mono.just(receivedMessage2));
StepVerifier.create(consumer.peek())
.expectNext(receivedMessage)
.verifyComplete();
StepVerifier.create(consumer.peek())
.expectNext(receivedMessage2)
.verifyComplete();
}
/**
* Verifies that this peek one messages from a sequence Number.
*/
@Test
void peekWithSequenceOneMessage() {
final int fromSequenceNumber = 10;
final ServiceBusReceivedMessage receivedMessage = mock(ServiceBusReceivedMessage.class);
when(managementNode.peek(fromSequenceNumber)).thenReturn(Mono.just(receivedMessage));
StepVerifier.create(consumer.peek(fromSequenceNumber))
.expectNext(receivedMessage)
.verifyComplete();
}
/**
* Verifies that this receives a number of messages. Verifies that the initial credits we add are equal to the
* prefetch value.
*/
@Test
void receivesNumberOfEvents() {
final int numberOfEvents = 1;
final List<Message> messages = getMessages(10);
when(messageSerializer.deserialize(any(Message.class), eq(ServiceBusReceivedMessage.class)))
.thenReturn(mock(ServiceBusReceivedMessage.class));
StepVerifier.create(consumer.receive().take(numberOfEvents))
.then(() -> messages.forEach(m -> messageSink.next(m)))
.expectNextCount(numberOfEvents)
.verifyComplete();
verify(amqpReceiveLink).addCredits(PREFETCH);
}
/**
* Verifies that we can receive messages from the processor.
*/
@Test
void receivesAndAutoCompletes() {
final ReceiveMessageOptions options = new ReceiveMessageOptions().setPrefetchCount(PREFETCH)
.setAutoComplete(true);
final ServiceBusReceiverAsyncClient consumer2 = new ServiceBusReceiverAsyncClient(
NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, false, options, connectionProcessor,
tracerProvider, messageSerializer);
final UUID lockToken1 = UUID.randomUUID();
final UUID lockToken2 = UUID.randomUUID();
final Instant expiration = Instant.now().plus(Duration.ofMinutes(1));
final MessageWithLockToken message = mock(MessageWithLockToken.class);
final MessageWithLockToken message2 = mock(MessageWithLockToken.class);
when(message.getLockToken()).thenReturn(lockToken1);
when(message2.getLockToken()).thenReturn(lockToken2);
when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage);
when(messageSerializer.deserialize(message2, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage2);
when(receivedMessage.getLockToken()).thenReturn(lockToken1);
when(receivedMessage.getLockedUntil()).thenReturn(expiration);
when(receivedMessage2.getLockToken()).thenReturn(lockToken2);
when(receivedMessage2.getLockedUntil()).thenReturn(expiration);
when(connection.getManagementNode(ENTITY_PATH, ENTITY_TYPE))
.thenReturn(Mono.just(managementNode));
when(managementNode.updateDisposition(eq(lockToken1), eq(DispositionStatus.COMPLETED), isNull(), isNull(), isNull()))
.thenReturn(Mono.empty());
when(managementNode.updateDisposition(eq(lockToken2), eq(DispositionStatus.COMPLETED), isNull(), isNull(), isNull()))
.thenReturn(Mono.empty());
StepVerifier.create(consumer2.receive().take(2))
.then(() -> {
messageSink.next(message);
messageSink.next(message2);
})
.expectNext(receivedMessage)
.expectNext(receivedMessage2)
.thenAwait(Duration.ofSeconds(5))
.verifyComplete();
logger.info("Verifying assertions.");
verify(managementNode).updateDisposition(eq(lockToken1), eq(DispositionStatus.COMPLETED), isNull(), isNull(), isNull());
verify(managementNode).updateDisposition(eq(lockToken2), eq(DispositionStatus.COMPLETED), isNull(), isNull(), isNull());
}
/**
* Verifies that if there is no lock token, the message is not completed.
*/
@Test
void receivesAndAutoCompleteWithoutLockToken() {
final ReceiveMessageOptions options = new ReceiveMessageOptions().setPrefetchCount(PREFETCH)
.setAutoComplete(true);
final ServiceBusReceiverAsyncClient consumer2 = new ServiceBusReceiverAsyncClient(
NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, false, options, connectionProcessor,
tracerProvider, messageSerializer);
final MessageWithLockToken message = mock(MessageWithLockToken.class);
final MessageWithLockToken message2 = mock(MessageWithLockToken.class);
when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage);
when(messageSerializer.deserialize(message2, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage2);
when(connection.getManagementNode(ENTITY_PATH, ENTITY_TYPE))
.thenReturn(Mono.just(managementNode));
when(managementNode.updateDisposition(any(), eq(DispositionStatus.COMPLETED), isNull(), isNull(), isNull()))
.thenReturn(Mono.delay(Duration.ofMillis(250)).then());
try {
StepVerifier.create(consumer2.receive().take(2))
.then(() -> {
messageSink.next(message);
messageSink.next(message2);
})
.expectNext(receivedMessage)
.expectNext(receivedMessage2)
.verifyComplete();
} finally {
consumer2.close();
}
verifyZeroInteractions(managementNode);
}
/**
* Verifies that we error if we try to complete a message without a lock token.
*/
@Test
void completeNullLockToken() {
when(connection.getManagementNode(ENTITY_PATH, ENTITY_TYPE)).thenReturn(Mono.just(managementNode));
when(managementNode.updateDisposition(any(), eq(DispositionStatus.COMPLETED), isNull(), isNull(), isNull()))
.thenReturn(Mono.delay(Duration.ofMillis(250)).then());
when(receivedMessage.getLockToken()).thenReturn(null);
StepVerifier.create(consumer.complete(receivedMessage))
.expectError(IllegalArgumentException.class)
.verify();
verify(managementNode, times(0))
.updateDisposition(any(), eq(DispositionStatus.COMPLETED), isNull(), isNull(), isNull());
}
/**
* Verifies that we error if we try to complete a null message.
*/
@Test
void completeNullMessage() {
StepVerifier.create(consumer.complete(null)).expectError(NullPointerException.class).verify();
}
/**
* Verifies that we error if we complete in RECEIVE_AND_DELETE mode.
*/
@Test
void completeInReceiveAndDeleteMode() {
final ReceiveMessageOptions options = new ReceiveMessageOptions()
.setAutoComplete(false)
.setReceiveMode(ReceiveMode.RECEIVE_AND_DELETE);
ServiceBusReceiverAsyncClient client = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH,
MessagingEntityType.QUEUE, false, options, connectionProcessor, tracerProvider,
messageSerializer);
final UUID lockToken1 = UUID.randomUUID();
when(receivedMessage.getLockToken()).thenReturn(lockToken1);
try {
StepVerifier.create(client.complete(receivedMessage))
.expectError(UnsupportedOperationException.class)
.verify();
} finally {
client.close();
}
}
/**
* Verifies that this peek batch of messages.
*/
@Test
void peekBatchMessages() {
final int numberOfEvents = 2;
when(managementNode.peekBatch(numberOfEvents))
.thenReturn(Flux.fromArray(new ServiceBusReceivedMessage[]{receivedMessage, receivedMessage2}));
StepVerifier.create(consumer.peekBatch(numberOfEvents))
.expectNextCount(numberOfEvents)
.verifyComplete();
}
/**
* Verifies that this peek batch of messages from a sequence Number.
*/
@Test
void peekBatchWithSequenceNumberMessages() {
final int numberOfEvents = 2;
final int fromSequenceNumber = 10;
when(managementNode.peekBatch(numberOfEvents, fromSequenceNumber))
.thenReturn(Flux.fromArray(new ServiceBusReceivedMessage[]{receivedMessage, receivedMessage2}));
StepVerifier.create(consumer.peekBatch(numberOfEvents, fromSequenceNumber))
.expectNext(receivedMessage, receivedMessage2)
.verifyComplete();
}
/**
* Verifies that we can deadletter a message with an error and description.
*/
@Test
void deadLetterWithDescription() {
final UUID lockToken1 = UUID.randomUUID();
final String description = "some-dead-letter-description";
final String reason = "dead-letter-reason";
final Map<String, Object> propertiesToModify = new HashMap<>();
propertiesToModify.put("something", true);
final Instant expiration = Instant.now().plus(Duration.ofMinutes(5));
final MessageWithLockToken message = mock(MessageWithLockToken.class);
when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage);
when(receivedMessage.getLockToken()).thenReturn(lockToken1);
when(receivedMessage.getLockedUntil()).thenReturn(expiration);
when(connection.getManagementNode(ENTITY_PATH, ENTITY_TYPE)).thenReturn(Mono.just(managementNode));
when(managementNode.updateDisposition(lockToken1, DispositionStatus.SUSPENDED, reason, description, propertiesToModify))
.thenReturn(Mono.empty());
StepVerifier.create(consumer.receive()
.take(1)
.flatMap(m -> consumer.deadLetter(m, reason, description, propertiesToModify)))
.then(() -> messageSink.next(message))
.expectNext()
.verifyComplete();
verify(managementNode).updateDisposition(lockToken1, DispositionStatus.SUSPENDED, reason, description, propertiesToModify);
}
/**
* Verifies that the user can complete settlement methods on received message.
*/
@ParameterizedTest
@EnumSource(DispositionStatus.class)
void settleMessage(DispositionStatus dispositionStatus) {
final UUID lockToken1 = UUID.randomUUID();
final UUID lockToken2 = UUID.randomUUID();
final Instant expiration = Instant.now().plus(Duration.ofMinutes(5));
final MessageWithLockToken message = mock(MessageWithLockToken.class);
final MessageWithLockToken message2 = mock(MessageWithLockToken.class);
when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage);
when(messageSerializer.deserialize(message2, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage2);
when(receivedMessage.getLockToken()).thenReturn(lockToken1);
when(receivedMessage.getLockedUntil()).thenReturn(expiration);
when(receivedMessage2.getLockToken()).thenReturn(lockToken2);
when(receivedMessage2.getLockedUntil()).thenReturn(expiration);
when(connection.getManagementNode(ENTITY_PATH, ENTITY_TYPE))
.thenReturn(Mono.just(managementNode));
when(managementNode.updateDisposition(lockToken1, dispositionStatus, null, null, null))
.thenReturn(Mono.empty());
when(managementNode.updateDisposition(lockToken2, dispositionStatus, null, null, null))
.thenReturn(Mono.empty());
StepVerifier.create(consumer.receive().take(2))
.then(() -> {
messageSink.next(message);
messageSink.next(message2);
})
.expectNext(receivedMessage)
.expectNext(receivedMessage2)
.thenAwait(Duration.ofSeconds(5))
.verifyComplete();
final Mono<Void> operation;
switch (dispositionStatus) {
case DEFERRED:
operation = consumer.defer(receivedMessage);
break;
case ABANDONED:
operation = consumer.abandon(receivedMessage);
break;
case COMPLETED:
operation = consumer.complete(receivedMessage);
break;
case SUSPENDED:
operation = consumer.deadLetter(receivedMessage);
break;
default:
throw new IllegalArgumentException("Unrecognized operation: " + dispositionStatus);
}
StepVerifier.create(operation)
.verifyComplete();
verify(managementNode).updateDisposition(lockToken1, dispositionStatus, null, null, null);
verify(managementNode, times(0)).updateDisposition(lockToken2, dispositionStatus, null, null, null);
}
/**
* Verifies that this receive deferred one messages from a sequence Number.
*/
@Test
/**
* Verifies that this receive deferred messages from a sequence Number.
*/
@Test
void receiveDeferredBatchFromSequenceNumber() {
final int fromSequenceNumber1 = 10;
final int fromSequenceNumber2 = 11;
when(managementNode.receiveDeferredMessageBatch(receiveOptions.getReceiveMode(), fromSequenceNumber1, fromSequenceNumber2))
.thenReturn(Flux.fromArray(new ServiceBusReceivedMessage[]{receivedMessage, receivedMessage2}));
StepVerifier.create(consumer.receiveDeferredMessageBatch(fromSequenceNumber1, fromSequenceNumber2))
.expectNext(receivedMessage)
.expectNext(receivedMessage2)
.verifyComplete();
}
private List<Message> getMessages(int numberOfEvents) {
final Map<String, String> map = Collections.singletonMap("SAMPLE_HEADER", "foo");
return IntStream.range(0, numberOfEvents)
.mapToObj(index -> getMessage(PAYLOAD_BYTES, messageTrackingUUID, map))
.collect(Collectors.toList());
}
} |
* It would be easier to create a static default_dead_letter_options and then pass the arguments in, so that we don't do this extra logic if it is null. | public Mono<Void> deadLetter(ServiceBusReceivedMessage message, DeadLetterOptions deadLetterOptions) {
if (deadLetterOptions != null) {
return updateDisposition(message, DispositionStatus.SUSPENDED, deadLetterOptions.getDeadLetterReason(),
deadLetterOptions.getDeadLetterErrorDescription(), deadLetterOptions.getPropertiesToModify());
} else {
return updateDisposition(message, DispositionStatus.SUSPENDED, null, null, null);
}
} | if (deadLetterOptions != null) { | public Mono<Void> deadLetter(ServiceBusReceivedMessage message, DeadLetterOptions deadLetterOptions) {
Objects.requireNonNull(deadLetterOptions, "'deadLetterOptions' cannot be null.");
return updateDisposition(message, DispositionStatus.SUSPENDED, deadLetterOptions.getDeadLetterReason(),
deadLetterOptions.getDeadLetterErrorDescription(), deadLetterOptions.getPropertiesToModify());
} | class ServiceBusReceiverAsyncClient implements Closeable {
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClient.class);
private final ConcurrentHashMap<UUID, Instant> lockTokenExpirationMap = new ConcurrentHashMap<>();
private final String fullyQualifiedNamespace;
private final String entityPath;
private final MessagingEntityType entityType;
private final boolean isSessionEnabled;
private final ServiceBusConnectionProcessor connectionProcessor;
private final TracerProvider tracerProvider;
private final MessageSerializer messageSerializer;
private final Duration maxAutoRenewDuration;
private final int prefetch;
private final boolean isAutoComplete;
private final ReceiveMode receiveMode;
/**
* Map containing linkNames and their associated consumers. Key: linkName Value: consumer associated with that
* linkName.
*/
private final ConcurrentHashMap<String, ServiceBusAsyncConsumer> openConsumers = new ConcurrentHashMap<>();
ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType,
boolean isSessionEnabled, ReceiveMessageOptions receiveMessageOptions,
ServiceBusConnectionProcessor connectionProcessor, TracerProvider tracerProvider,
MessageSerializer messageSerializer) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null.");
this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null.");
this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null.");
this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null.");
Objects.requireNonNull(receiveMessageOptions, "'receiveMessageOptions' cannot be null.");
this.prefetch = receiveMessageOptions.getPrefetchCount();
this.maxAutoRenewDuration = receiveMessageOptions.getMaxAutoRenewDuration();
this.isAutoComplete = receiveMessageOptions.isAutoComplete();
this.receiveMode = receiveMessageOptions.getReceiveMode();
this.entityType = entityType;
this.isSessionEnabled = isSessionEnabled;
}
/**
* Gets the fully qualified Service Bus namespace that the connection is associated with. This is likely similar to
* {@code {yournamespace}.servicebus.windows.net}.
*
* @return The fully qualified Service Bus namespace that the connection is associated with.
*/
public String getFullyQualifiedNamespace() {
return fullyQualifiedNamespace;
}
/**
* Gets the Service Bus resource this client interacts with.
*
* @return The Service Bus resource this client interacts with.
*/
public String getServiceBusResourceName() {
return entityPath;
}
/**
* Receives a stream of {@link ServiceBusReceivedMessage}.
*
* @return A stream of messages from Service Bus.
*/
public Flux<ServiceBusReceivedMessage> receive() {
if (isDisposed.get()) {
return Flux.error(logger.logExceptionAsError(
new IllegalStateException("Cannot receive from a client that is already closed.")));
}
if (receiveMode != ReceiveMode.PEEK_LOCK && isAutoComplete) {
return Flux.error(logger.logExceptionAsError(new UnsupportedOperationException(
"Autocomplete is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.")));
}
return Flux.usingWhen(
Mono.fromCallable(() -> getOrCreateConsumer(entityPath)),
consumer -> {
return consumer.receive().map(message -> {
if (message.getLockToken() == null || MessageUtils.ZERO_LOCK_TOKEN.equals(message.getLockToken())) {
return message;
}
lockTokenExpirationMap.compute(message.getLockToken(), (key, existing) -> {
if (existing == null) {
return message.getLockedUntil();
} else {
return existing.isBefore(message.getLockedUntil())
? message.getLockedUntil()
: existing;
}
});
return message;
});
},
consumer -> {
final String linkName = consumer.getLinkName();
logger.info("{}: Receiving completed. Disposing", linkName);
final ServiceBusAsyncConsumer removed = openConsumers.remove(linkName);
if (removed == null) {
logger.warning("Could not find consumer to remove for: {}", linkName);
return Mono.empty();
} else {
return removed.disposeAsync();
}
});
}
/**
* Abandon {@link ServiceBusMessage} with lock token. This will make the message available again for processing.
* Abandoning a message will increase the delivery count on the message.
*
* @param message to be used.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
public Mono<Void> abandon(ServiceBusReceivedMessage message) {
return abandon(message, null);
}
/**
* Abandon {@link ServiceBusMessage} with lock token and updated message property. This will make the message
* available again for processing. Abandoning a message will increase the delivery count on the message.
*
* @param message to be used.
* @param propertiesToModify Message properties to modify.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
public Mono<Void> abandon(ServiceBusReceivedMessage message, Map<String, Object> propertiesToModify) {
return updateDisposition(message, DispositionStatus.ABANDONED, null, null, propertiesToModify);
}
/**
* Completes a {@link ServiceBusMessage} using its lock token. This will delete the message from the service.
*
* @param message Message to be completed.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
public Mono<Void> complete(ServiceBusReceivedMessage message) {
return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null);
}
/**
* Defers a {@link ServiceBusMessage} using its lock token. This will move message into deferred subqueue.
*
* @param message to be used.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
public Mono<Void> defer(ServiceBusReceivedMessage message) {
return defer(message, null);
}
/**
* Asynchronously renews the lock on the message specified by the lock token. The lock will be renewed based on the
* setting specified on the entity. When a message is received in {@link ReceiveMode
* locked on the server for this receiver instance for a duration as specified during the Queue creation
* (LockDuration). If processing of the message requires longer than this duration, the lock needs to be renewed.
* For each renewal, the lock is reset to the entity's LockDuration value.
*
* @param messageLock The {@link UUID} value of the message lock to renew.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
public Mono<Instant> renewMessageLock(UUID messageLock) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(serviceBusManagementNode -> serviceBusManagementNode
.renewMessageLock(messageLock));
}
/**
* Asynchronously renews the lock on the specified message. The lock will be renewed based on the setting specified
* on the entity. When a message is received in {@link ReceiveMode
* server for this receiver instance for a duration as specified during the Queue creation (LockDuration). If
* processing of the message requires longer than this duration, the lock needs to be renewed. For each renewal, the
* lock is reset to the entity's LockDuration value.
*
* @param receivedMessage to be used to renew.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
public Mono<Instant> renewMessageLock(ServiceBusReceivedMessage receivedMessage) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(serviceBusManagementNode -> serviceBusManagementNode
.renewMessageLock(receivedMessage.getLockToken())
.map(instant -> {
receivedMessage.setLockedUntil(instant);
return instant;
}));
}
/**
* Defers a {@link ServiceBusMessage} using its lock token with modified message property. This will move message
* into deferred subqueue.
*
* @param message to be used.
* @param propertiesToModify Message properties to modify.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
public Mono<Void> defer(ServiceBusReceivedMessage message, Map<String, Object> propertiesToModify) {
return updateDisposition(message, DispositionStatus.DEFERRED, null, null, propertiesToModify);
}
/**
* Moves a {@link ServiceBusMessage} to the deadletter sub-queue.
*
* @param message to be used.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
public Mono<Void> deadLetter(ServiceBusReceivedMessage message) {
return deadLetter(message, null);
}
/**
* Moves a {@link ServiceBusMessage} to the deadletter sub-queue with deadletter reason, error description and
* modifided properties.
*
* @param message to be used.
* @param deadLetterOptions The options to specify while moving message to the deadletter sub-queue.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
/**
* Receives a deferred {@link ServiceBusMessage}. Deferred messages can only be received by using sequence number.
*
* @param sequenceNumber The {@link ServiceBusReceivedMessage
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(node -> node.receiveDeferredMessage(receiveMode, sequenceNumber));
}
/**
* Reads the next active message without changing the state of the receiver or the message source. The first call to
* {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent
* message in the entity.
*
* @return Single {@link ServiceBusReceivedMessage} peeked.
*/
public Mono<ServiceBusReceivedMessage> peek() {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(ServiceBusManagementNode::peek);
}
/**
* Receives a deferred {@link ServiceBusReceivedMessage}. Deferred messages can only be received by using
* sequence number.
*
* @param sequenceNumbers of the messages to be received.
* @return The {@link Flux} of deferred {@link ServiceBusReceivedMessage}.
*/
public Flux<ServiceBusReceivedMessage> receiveDeferredMessageBatch(long... sequenceNumbers) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMapMany(node -> node.receiveDeferredMessageBatch(receiveMode, sequenceNumbers));
}
/**
* Reads next the active message without changing the state of the receiver or the message source.
*
* @param fromSequenceNumber The sequence number from where to read the message.
*
* @return Single {@link ServiceBusReceivedMessage} peeked.
*/
public Mono<ServiceBusReceivedMessage> peek(long fromSequenceNumber) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(node -> node.peek(fromSequenceNumber));
}
/**
* Reads the next batch of active messages without changing the state of the receiver or the message source.
*
* @param maxMessages The number of messages.
*
* @return The {@link Flux} of {@link ServiceBusReceivedMessage} peeked.
*/
public Flux<ServiceBusReceivedMessage> peekBatch(int maxMessages) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMapMany(node -> node.peekBatch(maxMessages));
}
/**
* Reads the next batch of active messages without changing the state of the receiver or the message source.
*
* @param maxMessages The number of messages.
* @param fromSequenceNumber The sequence number from where to read the message.
*
* @return The {@link Flux} of {@link ServiceBusReceivedMessage} peeked.
*/
public Flux<ServiceBusReceivedMessage> peekBatch(int maxMessages, long fromSequenceNumber) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMapMany(node -> node.peekBatch(maxMessages, fromSequenceNumber));
}
/**
* Disposes of the consumer by closing the underlying connection to the service.
*/
@Override
public void close() {
if (isDisposed.get()) {
return;
}
logger.info("Removing receiver client.");
connectionProcessor.dispose();
cleanup().block(connectionProcessor.getRetryOptions().getTryTimeout());
}
Mono<Void> cleanup() {
if (isDisposed.getAndSet(true)) {
return Mono.empty();
}
List<Mono<Void>> collect = openConsumers.keySet().stream()
.map(e -> {
final ServiceBusAsyncConsumer consumer = openConsumers.get(e);
return consumer != null ? consumer.disposeAsync() : null;
})
.filter(Objects::nonNull)
.collect(Collectors.toList());
return Mono.when(collect).then(Mono.fromRunnable(() -> openConsumers.clear()));
}
private Mono<Boolean> isLockTokenValid(UUID lockToken) {
final Instant lockedUntilUtc = lockTokenExpirationMap.get(lockToken);
if (lockedUntilUtc == null) {
logger.warning("lockToken[{}] is not owned by this receiver.", lockToken);
return Mono.just(false);
}
final Instant now = Instant.now();
if (lockedUntilUtc.isBefore(now)) {
return Mono.error(logger.logExceptionAsError(new AmqpException(false, String.format(
"Lock already expired for the lock token. Expiration: '%s'. Now: '%s'", lockedUntilUtc, now),
getErrorContext())));
}
return Mono.just(true);
}
private Mono<Void> updateDisposition(ServiceBusReceivedMessage message, DispositionStatus dispositionStatus,
String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify) {
if (message == null) {
return Mono.error(new NullPointerException("'message' cannot be null."));
}
final UUID lockToken = message.getLockToken();
if (receiveMode != ReceiveMode.PEEK_LOCK) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"'%s' is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.", dispositionStatus))));
} else if (lockToken == null) {
return Mono.error(logger.logExceptionAsError(new IllegalArgumentException(
"'message.getLockToken()' cannot be null.")));
}
final Instant instant = lockTokenExpirationMap.get(lockToken);
logger.info("{}: Update started. Disposition: {}. Sequence number: {}. Lock: {}. Expiration: {}",
entityPath, dispositionStatus, message.getSequenceNumber(), lockToken, instant);
return isLockTokenValid(lockToken).flatMap(isLocked -> {
return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(node -> {
if (isLocked) {
return node.updateDisposition(lockToken, dispositionStatus, deadLetterReason,
deadLetterErrorDescription, propertiesToModify);
} else {
return Mono.error(new UnsupportedOperationException(
"Cannot complete a message that is not locked. lockToken: " + lockToken));
}
});
}).then(Mono.fromRunnable(() -> {
logger.info("{}: Update completed. Disposition: {}. Sequence number: {}. Lock: {}.",
entityPath, dispositionStatus, message.getSequenceNumber(), lockToken);
lockTokenExpirationMap.remove(lockToken);
}));
}
private ServiceBusAsyncConsumer getOrCreateConsumer(String linkName) {
return openConsumers.computeIfAbsent(linkName, name -> {
logger.info("{}: Creating consumer for link '{}'", entityPath, linkName);
final Flux<AmqpReceiveLink> receiveLink =
connectionProcessor.flatMap(connection -> connection.createReceiveLink(linkName, entityPath,
receiveMode, isSessionEnabled, null, entityType))
.doOnNext(next -> {
final String format = "Created consumer for Service Bus resource: [{}] mode: [{}]"
+ " sessionEnabled? {} transferEntityPath: [{}], entityType: [{}]";
logger.verbose(format, next.getEntityPath(), receiveMode, isSessionEnabled, "N/A",
entityType);
})
.repeat();
final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(connectionProcessor.getRetryOptions());
final ServiceBusReceiveLinkProcessor linkMessageProcessor = receiveLink.subscribeWith(
new ServiceBusReceiveLinkProcessor(prefetch, retryPolicy, connectionProcessor));
return new ServiceBusAsyncConsumer(linkName, linkMessageProcessor, messageSerializer, isAutoComplete,
connectionProcessor.getRetryOptions(), this::complete, this::abandon);
});
}
private AmqpErrorContext getErrorContext() {
return new SessionErrorContext(getFullyQualifiedNamespace(), getServiceBusResourceName());
}
} | class ServiceBusReceiverAsyncClient implements Closeable {
private static final DeadLetterOptions DEFAULT_DEAD_LETTER_OPTIONS = new DeadLetterOptions();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClient.class);
private final ConcurrentHashMap<UUID, Instant> lockTokenExpirationMap = new ConcurrentHashMap<>();
private final String fullyQualifiedNamespace;
private final String entityPath;
private final MessagingEntityType entityType;
private final boolean isSessionEnabled;
private final ServiceBusConnectionProcessor connectionProcessor;
private final TracerProvider tracerProvider;
private final MessageSerializer messageSerializer;
private final Duration maxAutoRenewDuration;
private final int prefetch;
private final boolean isAutoComplete;
private final ReceiveMode receiveMode;
/**
* Map containing linkNames and their associated consumers. Key: linkName Value: consumer associated with that
* linkName.
*/
private final ConcurrentHashMap<String, ServiceBusAsyncConsumer> openConsumers = new ConcurrentHashMap<>();
ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType,
boolean isSessionEnabled, ReceiveMessageOptions receiveMessageOptions,
ServiceBusConnectionProcessor connectionProcessor, TracerProvider tracerProvider,
MessageSerializer messageSerializer) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null.");
this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null.");
this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null.");
this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null.");
Objects.requireNonNull(receiveMessageOptions, "'receiveMessageOptions' cannot be null.");
this.prefetch = receiveMessageOptions.getPrefetchCount();
this.maxAutoRenewDuration = receiveMessageOptions.getMaxAutoRenewDuration();
this.isAutoComplete = receiveMessageOptions.isAutoComplete();
this.receiveMode = receiveMessageOptions.getReceiveMode();
this.entityType = entityType;
this.isSessionEnabled = isSessionEnabled;
}
/**
* Gets the fully qualified Service Bus namespace that the connection is associated with. This is likely similar to
* {@code {yournamespace}.servicebus.windows.net}.
*
* @return The fully qualified Service Bus namespace that the connection is associated with.
*/
public String getFullyQualifiedNamespace() {
return fullyQualifiedNamespace;
}
/**
* Gets the Service Bus resource this client interacts with.
*
* @return The Service Bus resource this client interacts with.
*/
public String getServiceBusResourceName() {
return entityPath;
}
/**
* Receives a stream of {@link ServiceBusReceivedMessage}.
*
* @return A stream of messages from Service Bus.
*/
public Flux<ServiceBusReceivedMessage> receive() {
if (isDisposed.get()) {
return Flux.error(logger.logExceptionAsError(
new IllegalStateException("Cannot receive from a client that is already closed.")));
}
if (receiveMode != ReceiveMode.PEEK_LOCK && isAutoComplete) {
return Flux.error(logger.logExceptionAsError(new UnsupportedOperationException(
"Autocomplete is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.")));
}
return Flux.usingWhen(
Mono.fromCallable(() -> getOrCreateConsumer(entityPath)),
consumer -> {
return consumer.receive().map(message -> {
if (message.getLockToken() == null || MessageUtils.ZERO_LOCK_TOKEN.equals(message.getLockToken())) {
return message;
}
lockTokenExpirationMap.compute(message.getLockToken(), (key, existing) -> {
if (existing == null) {
return message.getLockedUntil();
} else {
return existing.isBefore(message.getLockedUntil())
? message.getLockedUntil()
: existing;
}
});
return message;
});
},
consumer -> {
final String linkName = consumer.getLinkName();
logger.info("{}: Receiving completed. Disposing", linkName);
final ServiceBusAsyncConsumer removed = openConsumers.remove(linkName);
if (removed == null) {
logger.warning("Could not find consumer to remove for: {}", linkName);
return Mono.empty();
} else {
return removed.disposeAsync();
}
});
}
/**
* Abandon {@link ServiceBusMessage} with lock token. This will make the message available again for processing.
* Abandoning a message will increase the delivery count on the message.
*
* @param message to be used.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
public Mono<Void> abandon(ServiceBusReceivedMessage message) {
return abandon(message, null);
}
/**
* Abandon {@link ServiceBusMessage} with lock token and updated message property. This will make the message
* available again for processing. Abandoning a message will increase the delivery count on the message.
*
* @param message to be used.
* @param propertiesToModify Message properties to modify.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
public Mono<Void> abandon(ServiceBusReceivedMessage message, Map<String, Object> propertiesToModify) {
return updateDisposition(message, DispositionStatus.ABANDONED, null, null, propertiesToModify);
}
/**
* Completes a {@link ServiceBusMessage} using its lock token. This will delete the message from the service.
*
* @param message Message to be completed.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
public Mono<Void> complete(ServiceBusReceivedMessage message) {
return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null);
}
/**
* Defers a {@link ServiceBusMessage} using its lock token. This will move message into deferred subqueue.
*
* @param message to be used.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
public Mono<Void> defer(ServiceBusReceivedMessage message) {
return defer(message, null);
}
/**
* Asynchronously renews the lock on the message specified by the lock token. The lock will be renewed based on the
* setting specified on the entity. When a message is received in {@link ReceiveMode
* locked on the server for this receiver instance for a duration as specified during the Queue creation
* (LockDuration). If processing of the message requires longer than this duration, the lock needs to be renewed.
* For each renewal, the lock is reset to the entity's LockDuration value.
*
* @param messageLock The {@link UUID} value of the message lock to renew.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
public Mono<Instant> renewMessageLock(UUID messageLock) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(serviceBusManagementNode -> serviceBusManagementNode
.renewMessageLock(messageLock));
}
/**
* Asynchronously renews the lock on the specified message. The lock will be renewed based on the setting specified
* on the entity. When a message is received in {@link ReceiveMode
* server for this receiver instance for a duration as specified during the Queue creation (LockDuration). If
* processing of the message requires longer than this duration, the lock needs to be renewed. For each renewal, the
* lock is reset to the entity's LockDuration value.
*
* @param receivedMessage to be used to renew.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
public Mono<Instant> renewMessageLock(ServiceBusReceivedMessage receivedMessage) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(serviceBusManagementNode -> serviceBusManagementNode
.renewMessageLock(receivedMessage.getLockToken())
.map(instant -> {
receivedMessage.setLockedUntil(instant);
return instant;
}));
}
/**
* Defers a {@link ServiceBusMessage} using its lock token with modified message property. This will move message
* into deferred subqueue.
*
* @param message to be used.
* @param propertiesToModify Message properties to modify.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
public Mono<Void> defer(ServiceBusReceivedMessage message, Map<String, Object> propertiesToModify) {
return updateDisposition(message, DispositionStatus.DEFERRED, null, null, propertiesToModify);
}
/**
* Moves a {@link ServiceBusMessage} to the deadletter sub-queue.
*
* @param message to be used.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
public Mono<Void> deadLetter(ServiceBusReceivedMessage message) {
return deadLetter(message, DEFAULT_DEAD_LETTER_OPTIONS);
}
/**
* Moves a {@link ServiceBusMessage} to the deadletter sub-queue with deadletter reason, error description and
* modifided properties.
*
* @param message to be used.
* @param deadLetterOptions The options to specify while moving message to the deadletter sub-queue.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
/**
* Receives a deferred {@link ServiceBusMessage}. Deferred messages can only be received by using sequence number.
*
* @param sequenceNumber The {@link ServiceBusReceivedMessage
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(node -> node.receiveDeferredMessage(receiveMode, sequenceNumber));
}
/**
* Reads the next active message without changing the state of the receiver or the message source. The first call to
* {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent
* message in the entity.
*
* @return Single {@link ServiceBusReceivedMessage} peeked.
*/
public Mono<ServiceBusReceivedMessage> peek() {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(ServiceBusManagementNode::peek);
}
/**
* Receives a deferred {@link ServiceBusReceivedMessage}. Deferred messages can only be received by using
* sequence number.
*
* @param sequenceNumbers of the messages to be received.
* @return The {@link Flux} of deferred {@link ServiceBusReceivedMessage}.
*/
public Flux<ServiceBusReceivedMessage> receiveDeferredMessageBatch(long... sequenceNumbers) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMapMany(node -> node.receiveDeferredMessageBatch(receiveMode, sequenceNumbers));
}
/**
* Reads next the active message without changing the state of the receiver or the message source.
*
* @param fromSequenceNumber The sequence number from where to read the message.
*
* @return Single {@link ServiceBusReceivedMessage} peeked.
*/
public Mono<ServiceBusReceivedMessage> peek(long fromSequenceNumber) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(node -> node.peek(fromSequenceNumber));
}
/**
* Reads the next batch of active messages without changing the state of the receiver or the message source.
*
* @param maxMessages The number of messages.
*
* @return The {@link Flux} of {@link ServiceBusReceivedMessage} peeked.
*/
public Flux<ServiceBusReceivedMessage> peekBatch(int maxMessages) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMapMany(node -> node.peekBatch(maxMessages));
}
/**
* Reads the next batch of active messages without changing the state of the receiver or the message source.
*
* @param maxMessages The number of messages.
* @param fromSequenceNumber The sequence number from where to read the message.
*
* @return The {@link Flux} of {@link ServiceBusReceivedMessage} peeked.
*/
public Flux<ServiceBusReceivedMessage> peekBatch(int maxMessages, long fromSequenceNumber) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMapMany(node -> node.peekBatch(maxMessages, fromSequenceNumber));
}
/**
* Disposes of the consumer by closing the underlying connection to the service.
*/
@Override
public void close() {
if (isDisposed.get()) {
return;
}
logger.info("Removing receiver client.");
connectionProcessor.dispose();
cleanup().block(connectionProcessor.getRetryOptions().getTryTimeout());
}
Mono<Void> cleanup() {
if (isDisposed.getAndSet(true)) {
return Mono.empty();
}
List<Mono<Void>> collect = openConsumers.keySet().stream()
.map(e -> {
final ServiceBusAsyncConsumer consumer = openConsumers.get(e);
return consumer != null ? consumer.disposeAsync() : null;
})
.filter(Objects::nonNull)
.collect(Collectors.toList());
return Mono.when(collect).then(Mono.fromRunnable(() -> openConsumers.clear()));
}
private Mono<Boolean> isLockTokenValid(UUID lockToken) {
final Instant lockedUntilUtc = lockTokenExpirationMap.get(lockToken);
if (lockedUntilUtc == null) {
logger.warning("lockToken[{}] is not owned by this receiver.", lockToken);
return Mono.just(false);
}
final Instant now = Instant.now();
if (lockedUntilUtc.isBefore(now)) {
return Mono.error(logger.logExceptionAsError(new AmqpException(false, String.format(
"Lock already expired for the lock token. Expiration: '%s'. Now: '%s'", lockedUntilUtc, now),
getErrorContext())));
}
return Mono.just(true);
}
private Mono<Void> updateDisposition(ServiceBusReceivedMessage message, DispositionStatus dispositionStatus,
String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify) {
if (message == null) {
return Mono.error(new NullPointerException("'message' cannot be null."));
}
final UUID lockToken = message.getLockToken();
if (receiveMode != ReceiveMode.PEEK_LOCK) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"'%s' is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.", dispositionStatus))));
} else if (lockToken == null) {
return Mono.error(logger.logExceptionAsError(new IllegalArgumentException(
"'message.getLockToken()' cannot be null.")));
}
final Instant instant = lockTokenExpirationMap.get(lockToken);
logger.info("{}: Update started. Disposition: {}. Sequence number: {}. Lock: {}. Expiration: {}",
entityPath, dispositionStatus, message.getSequenceNumber(), lockToken, instant);
return isLockTokenValid(lockToken).flatMap(isLocked -> {
return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(node -> {
if (isLocked) {
return node.updateDisposition(lockToken, dispositionStatus, deadLetterReason,
deadLetterErrorDescription, propertiesToModify);
} else {
return Mono.error(new UnsupportedOperationException(
"Cannot complete a message that is not locked. lockToken: " + lockToken));
}
});
}).then(Mono.fromRunnable(() -> {
logger.info("{}: Update completed. Disposition: {}. Sequence number: {}. Lock: {}.",
entityPath, dispositionStatus, message.getSequenceNumber(), lockToken);
lockTokenExpirationMap.remove(lockToken);
}));
}
private ServiceBusAsyncConsumer getOrCreateConsumer(String linkName) {
return openConsumers.computeIfAbsent(linkName, name -> {
logger.info("{}: Creating consumer for link '{}'", entityPath, linkName);
final Flux<AmqpReceiveLink> receiveLink =
connectionProcessor.flatMap(connection -> connection.createReceiveLink(linkName, entityPath,
receiveMode, isSessionEnabled, null, entityType))
.doOnNext(next -> {
final String format = "Created consumer for Service Bus resource: [{}] mode: [{}]"
+ " sessionEnabled? {} transferEntityPath: [{}], entityType: [{}]";
logger.verbose(format, next.getEntityPath(), receiveMode, isSessionEnabled, "N/A",
entityType);
})
.repeat();
final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(connectionProcessor.getRetryOptions());
final ServiceBusReceiveLinkProcessor linkMessageProcessor = receiveLink.subscribeWith(
new ServiceBusReceiveLinkProcessor(prefetch, retryPolicy, connectionProcessor));
return new ServiceBusAsyncConsumer(linkName, linkMessageProcessor, messageSerializer, isAutoComplete,
connectionProcessor.getRetryOptions(), this::complete, this::abandon);
});
}
private AmqpErrorContext getErrorContext() {
return new SessionErrorContext(getFullyQualifiedNamespace(), getServiceBusResourceName());
}
} |
Since this is an optional parameter, I expected another overload that just takes the received message and under the covers would pass default_dead_letter_options, that way we can do a null object assertion in this method. | public Mono<Void> deadLetter(ServiceBusReceivedMessage message, DeadLetterOptions deadLetterOptions) {
if (deadLetterOptions != null) {
return updateDisposition(message, DispositionStatus.SUSPENDED, deadLetterOptions.getDeadLetterReason(),
deadLetterOptions.getDeadLetterErrorDescription(), deadLetterOptions.getPropertiesToModify());
} else {
return updateDisposition(message, DispositionStatus.SUSPENDED, null, null, null);
}
} | if (deadLetterOptions != null) { | public Mono<Void> deadLetter(ServiceBusReceivedMessage message, DeadLetterOptions deadLetterOptions) {
Objects.requireNonNull(deadLetterOptions, "'deadLetterOptions' cannot be null.");
return updateDisposition(message, DispositionStatus.SUSPENDED, deadLetterOptions.getDeadLetterReason(),
deadLetterOptions.getDeadLetterErrorDescription(), deadLetterOptions.getPropertiesToModify());
} | class ServiceBusReceiverAsyncClient implements Closeable {
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClient.class);
private final ConcurrentHashMap<UUID, Instant> lockTokenExpirationMap = new ConcurrentHashMap<>();
private final String fullyQualifiedNamespace;
private final String entityPath;
private final MessagingEntityType entityType;
private final boolean isSessionEnabled;
private final ServiceBusConnectionProcessor connectionProcessor;
private final TracerProvider tracerProvider;
private final MessageSerializer messageSerializer;
private final Duration maxAutoRenewDuration;
private final int prefetch;
private final boolean isAutoComplete;
private final ReceiveMode receiveMode;
/**
* Map containing linkNames and their associated consumers. Key: linkName Value: consumer associated with that
* linkName.
*/
private final ConcurrentHashMap<String, ServiceBusAsyncConsumer> openConsumers = new ConcurrentHashMap<>();
ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType,
boolean isSessionEnabled, ReceiveMessageOptions receiveMessageOptions,
ServiceBusConnectionProcessor connectionProcessor, TracerProvider tracerProvider,
MessageSerializer messageSerializer) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null.");
this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null.");
this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null.");
this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null.");
Objects.requireNonNull(receiveMessageOptions, "'receiveMessageOptions' cannot be null.");
this.prefetch = receiveMessageOptions.getPrefetchCount();
this.maxAutoRenewDuration = receiveMessageOptions.getMaxAutoRenewDuration();
this.isAutoComplete = receiveMessageOptions.isAutoComplete();
this.receiveMode = receiveMessageOptions.getReceiveMode();
this.entityType = entityType;
this.isSessionEnabled = isSessionEnabled;
}
/**
* Gets the fully qualified Service Bus namespace that the connection is associated with. This is likely similar to
* {@code {yournamespace}.servicebus.windows.net}.
*
* @return The fully qualified Service Bus namespace that the connection is associated with.
*/
public String getFullyQualifiedNamespace() {
return fullyQualifiedNamespace;
}
/**
* Gets the Service Bus resource this client interacts with.
*
* @return The Service Bus resource this client interacts with.
*/
public String getServiceBusResourceName() {
return entityPath;
}
/**
* Receives a stream of {@link ServiceBusReceivedMessage}.
*
* @return A stream of messages from Service Bus.
*/
public Flux<ServiceBusReceivedMessage> receive() {
if (isDisposed.get()) {
return Flux.error(logger.logExceptionAsError(
new IllegalStateException("Cannot receive from a client that is already closed.")));
}
if (receiveMode != ReceiveMode.PEEK_LOCK && isAutoComplete) {
return Flux.error(logger.logExceptionAsError(new UnsupportedOperationException(
"Autocomplete is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.")));
}
return Flux.usingWhen(
Mono.fromCallable(() -> getOrCreateConsumer(entityPath)),
consumer -> {
return consumer.receive().map(message -> {
if (message.getLockToken() == null || MessageUtils.ZERO_LOCK_TOKEN.equals(message.getLockToken())) {
return message;
}
lockTokenExpirationMap.compute(message.getLockToken(), (key, existing) -> {
if (existing == null) {
return message.getLockedUntil();
} else {
return existing.isBefore(message.getLockedUntil())
? message.getLockedUntil()
: existing;
}
});
return message;
});
},
consumer -> {
final String linkName = consumer.getLinkName();
logger.info("{}: Receiving completed. Disposing", linkName);
final ServiceBusAsyncConsumer removed = openConsumers.remove(linkName);
if (removed == null) {
logger.warning("Could not find consumer to remove for: {}", linkName);
return Mono.empty();
} else {
return removed.disposeAsync();
}
});
}
/**
* Abandon {@link ServiceBusMessage} with lock token. This will make the message available again for processing.
* Abandoning a message will increase the delivery count on the message.
*
* @param message to be used.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
public Mono<Void> abandon(ServiceBusReceivedMessage message) {
return abandon(message, null);
}
/**
* Abandon {@link ServiceBusMessage} with lock token and updated message property. This will make the message
* available again for processing. Abandoning a message will increase the delivery count on the message.
*
* @param message to be used.
* @param propertiesToModify Message properties to modify.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
public Mono<Void> abandon(ServiceBusReceivedMessage message, Map<String, Object> propertiesToModify) {
return updateDisposition(message, DispositionStatus.ABANDONED, null, null, propertiesToModify);
}
/**
* Completes a {@link ServiceBusMessage} using its lock token. This will delete the message from the service.
*
* @param message Message to be completed.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
public Mono<Void> complete(ServiceBusReceivedMessage message) {
return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null);
}
/**
* Defers a {@link ServiceBusMessage} using its lock token. This will move message into deferred subqueue.
*
* @param message to be used.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
public Mono<Void> defer(ServiceBusReceivedMessage message) {
return defer(message, null);
}
/**
* Asynchronously renews the lock on the message specified by the lock token. The lock will be renewed based on the
* setting specified on the entity. When a message is received in {@link ReceiveMode
* locked on the server for this receiver instance for a duration as specified during the Queue creation
* (LockDuration). If processing of the message requires longer than this duration, the lock needs to be renewed.
* For each renewal, the lock is reset to the entity's LockDuration value.
*
* @param messageLock The {@link UUID} value of the message lock to renew.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
public Mono<Instant> renewMessageLock(UUID messageLock) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(serviceBusManagementNode -> serviceBusManagementNode
.renewMessageLock(messageLock));
}
/**
* Asynchronously renews the lock on the specified message. The lock will be renewed based on the setting specified
* on the entity. When a message is received in {@link ReceiveMode
* server for this receiver instance for a duration as specified during the Queue creation (LockDuration). If
* processing of the message requires longer than this duration, the lock needs to be renewed. For each renewal, the
* lock is reset to the entity's LockDuration value.
*
* @param receivedMessage to be used to renew.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
public Mono<Instant> renewMessageLock(ServiceBusReceivedMessage receivedMessage) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(serviceBusManagementNode -> serviceBusManagementNode
.renewMessageLock(receivedMessage.getLockToken())
.map(instant -> {
receivedMessage.setLockedUntil(instant);
return instant;
}));
}
/**
* Defers a {@link ServiceBusMessage} using its lock token with modified message property. This will move message
* into deferred subqueue.
*
* @param message to be used.
* @param propertiesToModify Message properties to modify.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
public Mono<Void> defer(ServiceBusReceivedMessage message, Map<String, Object> propertiesToModify) {
return updateDisposition(message, DispositionStatus.DEFERRED, null, null, propertiesToModify);
}
/**
* Moves a {@link ServiceBusMessage} to the deadletter sub-queue.
*
* @param message to be used.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
public Mono<Void> deadLetter(ServiceBusReceivedMessage message) {
return deadLetter(message, null);
}
/**
* Moves a {@link ServiceBusMessage} to the deadletter sub-queue with deadletter reason, error description and
* modifided properties.
*
* @param message to be used.
* @param deadLetterOptions The options to specify while moving message to the deadletter sub-queue.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
/**
* Receives a deferred {@link ServiceBusMessage}. Deferred messages can only be received by using sequence number.
*
* @param sequenceNumber The {@link ServiceBusReceivedMessage
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(node -> node.receiveDeferredMessage(receiveMode, sequenceNumber));
}
/**
* Reads the next active message without changing the state of the receiver or the message source. The first call to
* {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent
* message in the entity.
*
* @return Single {@link ServiceBusReceivedMessage} peeked.
*/
public Mono<ServiceBusReceivedMessage> peek() {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(ServiceBusManagementNode::peek);
}
/**
* Receives a deferred {@link ServiceBusReceivedMessage}. Deferred messages can only be received by using
* sequence number.
*
* @param sequenceNumbers of the messages to be received.
* @return The {@link Flux} of deferred {@link ServiceBusReceivedMessage}.
*/
public Flux<ServiceBusReceivedMessage> receiveDeferredMessageBatch(long... sequenceNumbers) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMapMany(node -> node.receiveDeferredMessageBatch(receiveMode, sequenceNumbers));
}
/**
* Reads next the active message without changing the state of the receiver or the message source.
*
* @param fromSequenceNumber The sequence number from where to read the message.
*
* @return Single {@link ServiceBusReceivedMessage} peeked.
*/
public Mono<ServiceBusReceivedMessage> peek(long fromSequenceNumber) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(node -> node.peek(fromSequenceNumber));
}
/**
* Reads the next batch of active messages without changing the state of the receiver or the message source.
*
* @param maxMessages The number of messages.
*
* @return The {@link Flux} of {@link ServiceBusReceivedMessage} peeked.
*/
public Flux<ServiceBusReceivedMessage> peekBatch(int maxMessages) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMapMany(node -> node.peekBatch(maxMessages));
}
/**
* Reads the next batch of active messages without changing the state of the receiver or the message source.
*
* @param maxMessages The number of messages.
* @param fromSequenceNumber The sequence number from where to read the message.
*
* @return The {@link Flux} of {@link ServiceBusReceivedMessage} peeked.
*/
public Flux<ServiceBusReceivedMessage> peekBatch(int maxMessages, long fromSequenceNumber) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMapMany(node -> node.peekBatch(maxMessages, fromSequenceNumber));
}
/**
* Disposes of the consumer by closing the underlying connection to the service.
*/
@Override
public void close() {
if (isDisposed.get()) {
return;
}
logger.info("Removing receiver client.");
connectionProcessor.dispose();
cleanup().block(connectionProcessor.getRetryOptions().getTryTimeout());
}
Mono<Void> cleanup() {
if (isDisposed.getAndSet(true)) {
return Mono.empty();
}
List<Mono<Void>> collect = openConsumers.keySet().stream()
.map(e -> {
final ServiceBusAsyncConsumer consumer = openConsumers.get(e);
return consumer != null ? consumer.disposeAsync() : null;
})
.filter(Objects::nonNull)
.collect(Collectors.toList());
return Mono.when(collect).then(Mono.fromRunnable(() -> openConsumers.clear()));
}
private Mono<Boolean> isLockTokenValid(UUID lockToken) {
final Instant lockedUntilUtc = lockTokenExpirationMap.get(lockToken);
if (lockedUntilUtc == null) {
logger.warning("lockToken[{}] is not owned by this receiver.", lockToken);
return Mono.just(false);
}
final Instant now = Instant.now();
if (lockedUntilUtc.isBefore(now)) {
return Mono.error(logger.logExceptionAsError(new AmqpException(false, String.format(
"Lock already expired for the lock token. Expiration: '%s'. Now: '%s'", lockedUntilUtc, now),
getErrorContext())));
}
return Mono.just(true);
}
private Mono<Void> updateDisposition(ServiceBusReceivedMessage message, DispositionStatus dispositionStatus,
String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify) {
if (message == null) {
return Mono.error(new NullPointerException("'message' cannot be null."));
}
final UUID lockToken = message.getLockToken();
if (receiveMode != ReceiveMode.PEEK_LOCK) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"'%s' is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.", dispositionStatus))));
} else if (lockToken == null) {
return Mono.error(logger.logExceptionAsError(new IllegalArgumentException(
"'message.getLockToken()' cannot be null.")));
}
final Instant instant = lockTokenExpirationMap.get(lockToken);
logger.info("{}: Update started. Disposition: {}. Sequence number: {}. Lock: {}. Expiration: {}",
entityPath, dispositionStatus, message.getSequenceNumber(), lockToken, instant);
return isLockTokenValid(lockToken).flatMap(isLocked -> {
return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(node -> {
if (isLocked) {
return node.updateDisposition(lockToken, dispositionStatus, deadLetterReason,
deadLetterErrorDescription, propertiesToModify);
} else {
return Mono.error(new UnsupportedOperationException(
"Cannot complete a message that is not locked. lockToken: " + lockToken));
}
});
}).then(Mono.fromRunnable(() -> {
logger.info("{}: Update completed. Disposition: {}. Sequence number: {}. Lock: {}.",
entityPath, dispositionStatus, message.getSequenceNumber(), lockToken);
lockTokenExpirationMap.remove(lockToken);
}));
}
private ServiceBusAsyncConsumer getOrCreateConsumer(String linkName) {
return openConsumers.computeIfAbsent(linkName, name -> {
logger.info("{}: Creating consumer for link '{}'", entityPath, linkName);
final Flux<AmqpReceiveLink> receiveLink =
connectionProcessor.flatMap(connection -> connection.createReceiveLink(linkName, entityPath,
receiveMode, isSessionEnabled, null, entityType))
.doOnNext(next -> {
final String format = "Created consumer for Service Bus resource: [{}] mode: [{}]"
+ " sessionEnabled? {} transferEntityPath: [{}], entityType: [{}]";
logger.verbose(format, next.getEntityPath(), receiveMode, isSessionEnabled, "N/A",
entityType);
})
.repeat();
final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(connectionProcessor.getRetryOptions());
final ServiceBusReceiveLinkProcessor linkMessageProcessor = receiveLink.subscribeWith(
new ServiceBusReceiveLinkProcessor(prefetch, retryPolicy, connectionProcessor));
return new ServiceBusAsyncConsumer(linkName, linkMessageProcessor, messageSerializer, isAutoComplete,
connectionProcessor.getRetryOptions(), this::complete, this::abandon);
});
}
private AmqpErrorContext getErrorContext() {
return new SessionErrorContext(getFullyQualifiedNamespace(), getServiceBusResourceName());
}
} | class ServiceBusReceiverAsyncClient implements Closeable {
private static final DeadLetterOptions DEFAULT_DEAD_LETTER_OPTIONS = new DeadLetterOptions();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClient.class);
private final ConcurrentHashMap<UUID, Instant> lockTokenExpirationMap = new ConcurrentHashMap<>();
private final String fullyQualifiedNamespace;
private final String entityPath;
private final MessagingEntityType entityType;
private final boolean isSessionEnabled;
private final ServiceBusConnectionProcessor connectionProcessor;
private final TracerProvider tracerProvider;
private final MessageSerializer messageSerializer;
private final Duration maxAutoRenewDuration;
private final int prefetch;
private final boolean isAutoComplete;
private final ReceiveMode receiveMode;
/**
* Map containing linkNames and their associated consumers. Key: linkName Value: consumer associated with that
* linkName.
*/
private final ConcurrentHashMap<String, ServiceBusAsyncConsumer> openConsumers = new ConcurrentHashMap<>();
ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType,
boolean isSessionEnabled, ReceiveMessageOptions receiveMessageOptions,
ServiceBusConnectionProcessor connectionProcessor, TracerProvider tracerProvider,
MessageSerializer messageSerializer) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null.");
this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null.");
this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null.");
this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null.");
Objects.requireNonNull(receiveMessageOptions, "'receiveMessageOptions' cannot be null.");
this.prefetch = receiveMessageOptions.getPrefetchCount();
this.maxAutoRenewDuration = receiveMessageOptions.getMaxAutoRenewDuration();
this.isAutoComplete = receiveMessageOptions.isAutoComplete();
this.receiveMode = receiveMessageOptions.getReceiveMode();
this.entityType = entityType;
this.isSessionEnabled = isSessionEnabled;
}
/**
* Gets the fully qualified Service Bus namespace that the connection is associated with. This is likely similar to
* {@code {yournamespace}.servicebus.windows.net}.
*
* @return The fully qualified Service Bus namespace that the connection is associated with.
*/
public String getFullyQualifiedNamespace() {
return fullyQualifiedNamespace;
}
/**
* Gets the Service Bus resource this client interacts with.
*
* @return The Service Bus resource this client interacts with.
*/
public String getServiceBusResourceName() {
return entityPath;
}
/**
* Receives a stream of {@link ServiceBusReceivedMessage}.
*
* @return A stream of messages from Service Bus.
*/
public Flux<ServiceBusReceivedMessage> receive() {
if (isDisposed.get()) {
return Flux.error(logger.logExceptionAsError(
new IllegalStateException("Cannot receive from a client that is already closed.")));
}
if (receiveMode != ReceiveMode.PEEK_LOCK && isAutoComplete) {
return Flux.error(logger.logExceptionAsError(new UnsupportedOperationException(
"Autocomplete is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.")));
}
return Flux.usingWhen(
Mono.fromCallable(() -> getOrCreateConsumer(entityPath)),
consumer -> {
return consumer.receive().map(message -> {
if (message.getLockToken() == null || MessageUtils.ZERO_LOCK_TOKEN.equals(message.getLockToken())) {
return message;
}
lockTokenExpirationMap.compute(message.getLockToken(), (key, existing) -> {
if (existing == null) {
return message.getLockedUntil();
} else {
return existing.isBefore(message.getLockedUntil())
? message.getLockedUntil()
: existing;
}
});
return message;
});
},
consumer -> {
final String linkName = consumer.getLinkName();
logger.info("{}: Receiving completed. Disposing", linkName);
final ServiceBusAsyncConsumer removed = openConsumers.remove(linkName);
if (removed == null) {
logger.warning("Could not find consumer to remove for: {}", linkName);
return Mono.empty();
} else {
return removed.disposeAsync();
}
});
}
/**
* Abandon {@link ServiceBusMessage} with lock token. This will make the message available again for processing.
* Abandoning a message will increase the delivery count on the message.
*
* @param message to be used.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
public Mono<Void> abandon(ServiceBusReceivedMessage message) {
return abandon(message, null);
}
/**
* Abandon {@link ServiceBusMessage} with lock token and updated message property. This will make the message
* available again for processing. Abandoning a message will increase the delivery count on the message.
*
* @param message to be used.
* @param propertiesToModify Message properties to modify.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
public Mono<Void> abandon(ServiceBusReceivedMessage message, Map<String, Object> propertiesToModify) {
return updateDisposition(message, DispositionStatus.ABANDONED, null, null, propertiesToModify);
}
/**
* Completes a {@link ServiceBusMessage} using its lock token. This will delete the message from the service.
*
* @param message Message to be completed.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
public Mono<Void> complete(ServiceBusReceivedMessage message) {
return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null);
}
/**
* Defers a {@link ServiceBusMessage} using its lock token. This will move message into deferred subqueue.
*
* @param message to be used.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
public Mono<Void> defer(ServiceBusReceivedMessage message) {
return defer(message, null);
}
/**
* Asynchronously renews the lock on the message specified by the lock token. The lock will be renewed based on the
* setting specified on the entity. When a message is received in {@link ReceiveMode
* locked on the server for this receiver instance for a duration as specified during the Queue creation
* (LockDuration). If processing of the message requires longer than this duration, the lock needs to be renewed.
* For each renewal, the lock is reset to the entity's LockDuration value.
*
* @param messageLock The {@link UUID} value of the message lock to renew.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
public Mono<Instant> renewMessageLock(UUID messageLock) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(serviceBusManagementNode -> serviceBusManagementNode
.renewMessageLock(messageLock));
}
/**
* Asynchronously renews the lock on the specified message. The lock will be renewed based on the setting specified
* on the entity. When a message is received in {@link ReceiveMode
* server for this receiver instance for a duration as specified during the Queue creation (LockDuration). If
* processing of the message requires longer than this duration, the lock needs to be renewed. For each renewal, the
* lock is reset to the entity's LockDuration value.
*
* @param receivedMessage to be used to renew.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
public Mono<Instant> renewMessageLock(ServiceBusReceivedMessage receivedMessage) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(serviceBusManagementNode -> serviceBusManagementNode
.renewMessageLock(receivedMessage.getLockToken())
.map(instant -> {
receivedMessage.setLockedUntil(instant);
return instant;
}));
}
/**
* Defers a {@link ServiceBusMessage} using its lock token with modified message property. This will move message
* into deferred subqueue.
*
* @param message to be used.
* @param propertiesToModify Message properties to modify.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
public Mono<Void> defer(ServiceBusReceivedMessage message, Map<String, Object> propertiesToModify) {
return updateDisposition(message, DispositionStatus.DEFERRED, null, null, propertiesToModify);
}
/**
* Moves a {@link ServiceBusMessage} to the deadletter sub-queue.
*
* @param message to be used.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
public Mono<Void> deadLetter(ServiceBusReceivedMessage message) {
return deadLetter(message, DEFAULT_DEAD_LETTER_OPTIONS);
}
/**
* Moves a {@link ServiceBusMessage} to the deadletter sub-queue with deadletter reason, error description and
* modifided properties.
*
* @param message to be used.
* @param deadLetterOptions The options to specify while moving message to the deadletter sub-queue.
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
/**
* Receives a deferred {@link ServiceBusMessage}. Deferred messages can only be received by using sequence number.
*
* @param sequenceNumber The {@link ServiceBusReceivedMessage
*
* @return The {@link Mono} the finishes this operation on service bus resource.
*/
public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(node -> node.receiveDeferredMessage(receiveMode, sequenceNumber));
}
/**
* Reads the next active message without changing the state of the receiver or the message source. The first call to
* {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent
* message in the entity.
*
* @return Single {@link ServiceBusReceivedMessage} peeked.
*/
public Mono<ServiceBusReceivedMessage> peek() {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(ServiceBusManagementNode::peek);
}
/**
* Receives a deferred {@link ServiceBusReceivedMessage}. Deferred messages can only be received by using
* sequence number.
*
* @param sequenceNumbers of the messages to be received.
* @return The {@link Flux} of deferred {@link ServiceBusReceivedMessage}.
*/
public Flux<ServiceBusReceivedMessage> receiveDeferredMessageBatch(long... sequenceNumbers) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMapMany(node -> node.receiveDeferredMessageBatch(receiveMode, sequenceNumbers));
}
/**
* Reads next the active message without changing the state of the receiver or the message source.
*
* @param fromSequenceNumber The sequence number from where to read the message.
*
* @return Single {@link ServiceBusReceivedMessage} peeked.
*/
public Mono<ServiceBusReceivedMessage> peek(long fromSequenceNumber) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(node -> node.peek(fromSequenceNumber));
}
/**
* Reads the next batch of active messages without changing the state of the receiver or the message source.
*
* @param maxMessages The number of messages.
*
* @return The {@link Flux} of {@link ServiceBusReceivedMessage} peeked.
*/
public Flux<ServiceBusReceivedMessage> peekBatch(int maxMessages) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMapMany(node -> node.peekBatch(maxMessages));
}
/**
* Reads the next batch of active messages without changing the state of the receiver or the message source.
*
* @param maxMessages The number of messages.
* @param fromSequenceNumber The sequence number from where to read the message.
*
* @return The {@link Flux} of {@link ServiceBusReceivedMessage} peeked.
*/
public Flux<ServiceBusReceivedMessage> peekBatch(int maxMessages, long fromSequenceNumber) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMapMany(node -> node.peekBatch(maxMessages, fromSequenceNumber));
}
/**
* Disposes of the consumer by closing the underlying connection to the service.
*/
@Override
public void close() {
if (isDisposed.get()) {
return;
}
logger.info("Removing receiver client.");
connectionProcessor.dispose();
cleanup().block(connectionProcessor.getRetryOptions().getTryTimeout());
}
Mono<Void> cleanup() {
if (isDisposed.getAndSet(true)) {
return Mono.empty();
}
List<Mono<Void>> collect = openConsumers.keySet().stream()
.map(e -> {
final ServiceBusAsyncConsumer consumer = openConsumers.get(e);
return consumer != null ? consumer.disposeAsync() : null;
})
.filter(Objects::nonNull)
.collect(Collectors.toList());
return Mono.when(collect).then(Mono.fromRunnable(() -> openConsumers.clear()));
}
private Mono<Boolean> isLockTokenValid(UUID lockToken) {
final Instant lockedUntilUtc = lockTokenExpirationMap.get(lockToken);
if (lockedUntilUtc == null) {
logger.warning("lockToken[{}] is not owned by this receiver.", lockToken);
return Mono.just(false);
}
final Instant now = Instant.now();
if (lockedUntilUtc.isBefore(now)) {
return Mono.error(logger.logExceptionAsError(new AmqpException(false, String.format(
"Lock already expired for the lock token. Expiration: '%s'. Now: '%s'", lockedUntilUtc, now),
getErrorContext())));
}
return Mono.just(true);
}
private Mono<Void> updateDisposition(ServiceBusReceivedMessage message, DispositionStatus dispositionStatus,
String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify) {
if (message == null) {
return Mono.error(new NullPointerException("'message' cannot be null."));
}
final UUID lockToken = message.getLockToken();
if (receiveMode != ReceiveMode.PEEK_LOCK) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"'%s' is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.", dispositionStatus))));
} else if (lockToken == null) {
return Mono.error(logger.logExceptionAsError(new IllegalArgumentException(
"'message.getLockToken()' cannot be null.")));
}
final Instant instant = lockTokenExpirationMap.get(lockToken);
logger.info("{}: Update started. Disposition: {}. Sequence number: {}. Lock: {}. Expiration: {}",
entityPath, dispositionStatus, message.getSequenceNumber(), lockToken, instant);
return isLockTokenValid(lockToken).flatMap(isLocked -> {
return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(node -> {
if (isLocked) {
return node.updateDisposition(lockToken, dispositionStatus, deadLetterReason,
deadLetterErrorDescription, propertiesToModify);
} else {
return Mono.error(new UnsupportedOperationException(
"Cannot complete a message that is not locked. lockToken: " + lockToken));
}
});
}).then(Mono.fromRunnable(() -> {
logger.info("{}: Update completed. Disposition: {}. Sequence number: {}. Lock: {}.",
entityPath, dispositionStatus, message.getSequenceNumber(), lockToken);
lockTokenExpirationMap.remove(lockToken);
}));
}
private ServiceBusAsyncConsumer getOrCreateConsumer(String linkName) {
return openConsumers.computeIfAbsent(linkName, name -> {
logger.info("{}: Creating consumer for link '{}'", entityPath, linkName);
final Flux<AmqpReceiveLink> receiveLink =
connectionProcessor.flatMap(connection -> connection.createReceiveLink(linkName, entityPath,
receiveMode, isSessionEnabled, null, entityType))
.doOnNext(next -> {
final String format = "Created consumer for Service Bus resource: [{}] mode: [{}]"
+ " sessionEnabled? {} transferEntityPath: [{}], entityType: [{}]";
logger.verbose(format, next.getEntityPath(), receiveMode, isSessionEnabled, "N/A",
entityType);
})
.repeat();
final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(connectionProcessor.getRetryOptions());
final ServiceBusReceiveLinkProcessor linkMessageProcessor = receiveLink.subscribeWith(
new ServiceBusReceiveLinkProcessor(prefetch, retryPolicy, connectionProcessor));
return new ServiceBusAsyncConsumer(linkName, linkMessageProcessor, messageSerializer, isAutoComplete,
connectionProcessor.getRetryOptions(), this::complete, this::abandon);
});
}
private AmqpErrorContext getErrorContext() {
return new SessionErrorContext(getFullyQualifiedNamespace(), getServiceBusResourceName());
}
} |
We can probably make an educated guess of the longest possible time the sub process would take and fail after that? | public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) {
String azCommand = "az account get-access-token --output json --resource ";
StringBuilder command = new StringBuilder();
command.append(azCommand);
String scopes = ScopeUtil.scopesToResource(request.getScopes());
try {
ScopeUtil.validateScope(scopes);
} catch (IllegalArgumentException ex) {
return Mono.error(logger.logExceptionAsError(ex));
}
command.append(scopes);
AccessToken token = null;
BufferedReader reader = null;
try {
String starter;
String switcher;
if (isWindowsPlatform()) {
starter = WINDOWS_STARTER;
switcher = WINDOWS_SWITCHER;
} else {
starter = LINUX_MAC_STARTER;
switcher = LINUX_MAC_SWITCHER;
}
ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString());
String workingDirectory = getSafeWorkingDirectory();
if (workingDirectory != null) {
builder.directory(new File(workingDirectory));
} else {
throw logger.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be"
+ " found to execute CLI command from."));
}
builder.redirectErrorStream(true);
Process process = builder.start();
reader = new BufferedReader(new InputStreamReader(process.getInputStream(), "UTF-8"));
String line;
StringBuilder output = new StringBuilder();
while (true) {
line = reader.readLine();
if (line == null) {
break;
}
if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) {
throw logger.logExceptionAsError(
new ClientAuthenticationException("Azure CLI not installed", null));
}
output.append(line);
}
String processOutput = output.toString();
process.waitFor();
if (process.exitValue() != 0) {
if (processOutput.length() > 0) {
String redactedOutput = redactInfo("\"accessToken\": \"(.*?)(\"|$)", processOutput);
throw logger.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null));
} else {
throw logger.logExceptionAsError(
new ClientAuthenticationException("Failed to invoke Azure CLI ", null));
}
}
Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class,
SerializerEncoding.JSON);
String accessToken = objectMap.get("accessToken");
String time = objectMap.get("expiresOn");
String timeToSecond = time.substring(0, time.indexOf("."));
String timeJoinedWithT = String.join("T", timeToSecond.split(" "));
OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME)
.atZone(ZoneId.systemDefault())
.toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC);
token = new IdentityToken(accessToken, expiresOn, options);
} catch (IOException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
} catch (InterruptedException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
} catch (RuntimeException e) {
return Mono.error(logger.logExceptionAsError(e));
} finally {
try {
if (reader != null) {
reader.close();
}
} catch (IOException ex) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(ex)));
}
}
return Mono.just(token);
} | process.waitFor(); | public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) {
String azCommand = "az account get-access-token --output json --resource ";
StringBuilder command = new StringBuilder();
command.append(azCommand);
String scopes = ScopeUtil.scopesToResource(request.getScopes());
try {
ScopeUtil.validateScope(scopes);
} catch (IllegalArgumentException ex) {
return Mono.error(logger.logExceptionAsError(ex));
}
command.append(scopes);
AccessToken token = null;
BufferedReader reader = null;
try {
String starter;
String switcher;
if (isWindowsPlatform()) {
starter = WINDOWS_STARTER;
switcher = WINDOWS_SWITCHER;
} else {
starter = LINUX_MAC_STARTER;
switcher = LINUX_MAC_SWITCHER;
}
ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString());
String workingDirectory = getSafeWorkingDirectory();
if (workingDirectory != null) {
builder.directory(new File(workingDirectory));
} else {
throw logger.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be"
+ " found to execute CLI command from."));
}
builder.redirectErrorStream(true);
Process process = builder.start();
reader = new BufferedReader(new InputStreamReader(process.getInputStream(), "UTF-8"));
String line;
StringBuilder output = new StringBuilder();
while (true) {
line = reader.readLine();
if (line == null) {
break;
}
if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) {
throw logger.logExceptionAsError(
new ClientAuthenticationException("Azure CLI not installed", null));
}
output.append(line);
}
String processOutput = output.toString();
process.waitFor(10, TimeUnit.SECONDS);
if (process.exitValue() != 0) {
if (processOutput.length() > 0) {
String redactedOutput = redactInfo("\"accessToken\": \"(.*?)(\"|$)", processOutput);
throw logger.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null));
} else {
throw logger.logExceptionAsError(
new ClientAuthenticationException("Failed to invoke Azure CLI ", null));
}
}
Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class,
SerializerEncoding.JSON);
String accessToken = objectMap.get("accessToken");
String time = objectMap.get("expiresOn");
String timeToSecond = time.substring(0, time.indexOf("."));
String timeJoinedWithT = String.join("T", timeToSecond.split(" "));
OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME)
.atZone(ZoneId.systemDefault())
.toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC);
token = new IdentityToken(accessToken, expiresOn, options);
} catch (IOException | InterruptedException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
} catch (RuntimeException e) {
return Mono.error(logger.logExceptionAsError(e));
} finally {
try {
if (reader != null) {
reader.close();
}
} catch (IOException ex) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(ex)));
}
}
return Mono.just(token);
} | class IdentityClient {
private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter();
private static final Random RANDOM = new Random();
private static final String WINDOWS_STARTER = "cmd.exe";
private static final String LINUX_MAC_STARTER = "/bin/sh";
private static final String WINDOWS_SWITCHER = "/c";
private static final String LINUX_MAC_SWITCHER = "-c";
private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized";
private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found";
private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot");
private static final String DEFAULT_MAC_LINUX_PATH = "/bin/";
private final ClientLogger logger = new ClientLogger(IdentityClient.class);
private final IdentityClientOptions options;
private final PublicClientApplication publicClientApplication;
private final String tenantId;
private final String clientId;
private HttpPipelineAdapter httpPipelineAdapter;
/**
* Creates an IdentityClient with the given options.
*
* @param tenantId the tenant ID of the application.
* @param clientId the client ID of the application.
* @param options the options configuring the client.
*/
IdentityClient(String tenantId, String clientId, IdentityClientOptions options) {
if (tenantId == null) {
tenantId = "common";
}
if (options == null) {
options = new IdentityClientOptions();
}
this.tenantId = tenantId;
this.clientId = clientId;
this.options = options;
if (clientId == null) {
this.publicClientApplication = null;
} else {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId;
PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId);
try {
publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl);
} catch (MalformedURLException e) {
throw logger.logExceptionAsWarning(new IllegalStateException(e));
}
HttpPipeline httpPipeline = options.getHttpPipeline();
if (httpPipeline != null) {
httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline);
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
} else {
HttpClient httpClient = options.getHttpClient();
if (httpClient != null) {
httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient));
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
} else {
httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault()));
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
}
}
if (options.getExecutorService() != null) {
publicClientApplicationBuilder.executorService(options.getExecutorService());
}
this.publicClientApplication = publicClientApplicationBuilder.build();
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
/**
* Asynchronously acquire a token from Active Directory with a client secret.
*
* @param clientSecret the client secret of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromSecret(clientSecret))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
} catch (MalformedURLException e) {
return Mono.error(e);
}
}
private HttpPipeline setupPipeline(HttpClient httpClient) {
List<HttpPipelinePolicy> policies = new ArrayList<>();
HttpLogOptions httpLogOptions = new HttpLogOptions();
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RetryPolicy());
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new HttpPipelineBuilder().httpClient(httpClient)
.policies(policies.toArray(new HttpPipelinePolicy[0])).build();
}
/**
* Asynchronously acquire a token from Active Directory with a PKCS12 certificate.
*
* @param pfxCertificatePath the path to the PKCS12 certificate of the application
* @param pfxCertificatePassword the password protecting the PFX certificate
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword,
TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return Mono.fromCallable(() -> {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
new FileInputStream(pfxCertificatePath), pfxCertificatePassword))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
return applicationBuilder.build();
}).flatMap(application -> Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes())).build())))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory with a PEM certificate.
*
* @param pemCertificatePath the path to the PEM certificate of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath));
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
CertificateUtil.privateKeyFromPem(pemCertificateBytes),
CertificateUtil.publicKeyFromPem(pemCertificateBytes)))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
} catch (IOException e) {
return Mono.error(e);
}
}
/**
* Asynchronously acquire a token from Active Directory with a username and a password.
*
* @param request the details of the token request
* @param username the username of the user
* @param password the password of the user
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request,
String username, String password) {
return Mono.fromFuture(publicClientApplication.acquireToken(
UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray())
.build()))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) {
SilentParameters parameters;
if (msalToken.getAccount() != null) {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build();
} else {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build();
}
return Mono.defer(() -> {
try {
return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters))
.map(ar -> new MsalToken(ar, options));
} catch (MalformedURLException e) {
return Mono.error(e);
}
});
}
/**
* Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide
* a device code for login and the user must meet the challenge by authenticating in a browser on the current or a
* different device.
*
* @param request the details of the token request
* @param deviceCodeConsumer the user provided closure that will consume the device code challenge
* @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device
* code expires
*/
public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request,
Consumer<DeviceCodeInfo> deviceCodeConsumer) {
return Mono.fromFuture(() -> {
DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()),
dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(),
dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build();
return publicClientApplication.acquireToken(parameters);
}).map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow.
*
* @param request the details of the token request
* @param authorizationCode the oauth2 authorization code
* @param redirectUrl the redirectUrl where the authorization code is sent to
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode,
URI redirectUrl) {
return Mono.fromFuture(() -> publicClientApplication.acquireToken(
AuthorizationCodeParameters.builder(authorizationCode, redirectUrl)
.scopes(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The
* credential will run a minimal local HttpServer at the given port, so {@code http:
* listed as a valid reply URL for the application.
*
* @param request the details of the token request
* @param port the port on which the HTTP server is listening
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return AuthorizationCodeListener.create(port)
.flatMap(server -> {
URI redirectUri;
String browserUri;
try {
redirectUri = new URI(String.format("http:
browserUri =
String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt"
+ "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s",
authorityUrl,
clientId,
redirectUri.toString(),
UUID.randomUUID(),
String.join(" ", request.getScopes()));
} catch (URISyntaxException e) {
return server.dispose().then(Mono.error(e));
}
return server.listen()
.mergeWith(Mono.<String>fromRunnable(() -> {
try {
openUrl(browserUri);
} catch (IOException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
}
}).subscribeOn(Schedulers.newSingle("browser")))
.next()
.flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri))
.onErrorResume(t -> server.dispose().then(Mono.error(t)))
.flatMap(msalToken -> server.dispose().then(Mono.just(msalToken)));
});
}
/**
* Asynchronously acquire a token from the App Service Managed Service Identity endpoint.
*
* @param msiEndpoint the endpoint to acquire token from
* @param msiSecret the secret to acquire token with
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret,
TokenRequestContext request) {
return Mono.fromCallable(() -> {
String resource = ScopeUtil.scopesToResource(request.getScopes());
HttpURLConnection connection = null;
StringBuilder payload = new StringBuilder();
payload.append("resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
payload.append("&api-version=");
payload.append(URLEncoder.encode("2017-09-01", "UTF-8"));
if (clientId != null) {
payload.append("&clientid=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
try {
URL url = new URL(String.format("%s?%s", msiEndpoint, payload));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
if (msiSecret != null) {
connection.setRequestProperty("Secret", msiSecret);
}
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name())
.useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
MSIToken msiToken = SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON);
return new IdentityToken(msiToken.getToken(), msiToken.getExpiresAt(), options);
} finally {
if (connection != null) {
connection.disconnect();
}
}
});
}
/**
* Asynchronously acquire a token from the Virtual Machine IMDS endpoint.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) {
String resource = ScopeUtil.scopesToResource(request.getScopes());
StringBuilder payload = new StringBuilder();
final int imdsUpgradeTimeInMs = 70 * 1000;
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
payload.append("&resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
if (clientId != null) {
payload.append("&client_id=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
} catch (IOException exception) {
return Mono.error(exception);
}
return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> {
int retry = 1;
while (retry <= options.getMaxRetry()) {
URL url = null;
HttpURLConnection connection = null;
try {
url =
new URL(String.format("http:
payload.toString()));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name())
.useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
MSIToken msiToken = SERIALIZER_ADAPTER.deserialize(result,
MSIToken.class, SerializerEncoding.JSON);
return new IdentityToken(msiToken.getToken(), msiToken.getExpiresAt(), options);
} catch (IOException exception) {
if (connection == null) {
throw logger.logExceptionAsError(new RuntimeException(
String.format("Could not connect to the url: %s.", url), exception));
}
int responseCode = connection.getResponseCode();
if (responseCode == 410
|| responseCode == 429
|| responseCode == 404
|| (responseCode >= 500 && responseCode <= 599)) {
int retryTimeoutInMs = options.getRetryTimeout()
.apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000;
retryTimeoutInMs =
(responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs
: retryTimeoutInMs;
retry++;
if (retry > options.getMaxRetry()) {
break;
} else {
sleep(retryTimeoutInMs);
}
} else {
throw logger.logExceptionAsError(new RuntimeException(
"Couldn't acquire access token from IMDS, verify your objectId, "
+ "clientId or msiResourceId", exception));
}
} finally {
if (connection != null) {
connection.disconnect();
}
}
}
throw logger.logExceptionAsError(new RuntimeException(
String.format("MSI: Failed to acquire tokens after retrying %s times",
options.getMaxRetry())));
}));
}
private Mono<Boolean> checkIMDSAvailable() {
StringBuilder payload = new StringBuilder();
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
} catch (IOException exception) {
return Mono.error(exception);
}
return Mono.fromCallable(() -> {
HttpURLConnection connection = null;
URL url = new URL(String.format("http:
payload.toString()));
try {
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setConnectTimeout(500);
connection.connect();
} finally {
if (connection != null) {
connection.disconnect();
}
}
return true;
});
}
private static void sleep(int millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException ex) {
throw new IllegalStateException(ex);
}
}
private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) {
switch (options.getType()) {
case SOCKS4:
case SOCKS5:
return new Proxy(Type.SOCKS, options.getAddress());
case HTTP:
default:
return new Proxy(Type.HTTP, options.getAddress());
}
}
private String getSafeWorkingDirectory() {
if (isWindowsPlatform()) {
if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) {
return null;
}
return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32";
} else {
return DEFAULT_MAC_LINUX_PATH;
}
}
private boolean isWindowsPlatform() {
return System.getProperty("os.name").contains("Windows");
}
private String redactInfo(String regex, String input) {
return input.replaceAll(regex, "****");
}
void openUrl(String url) throws IOException {
Runtime rt = Runtime.getRuntime();
String os = System.getProperty("os.name").toLowerCase(Locale.ROOT);
if (os.contains("win")) {
rt.exec("rundll32 url.dll,FileProtocolHandler " + url);
} else if (os.contains("mac")) {
rt.exec("open " + url);
} else if (os.contains("nix") || os.contains("nux")) {
rt.exec("xdg-open " + url);
} else {
logger.error("Browser could not be opened - please open {} in a browser on this device.", url);
}
}
} | class IdentityClient {
private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter();
private static final Random RANDOM = new Random();
private static final String WINDOWS_STARTER = "cmd.exe";
private static final String LINUX_MAC_STARTER = "/bin/sh";
private static final String WINDOWS_SWITCHER = "/c";
private static final String LINUX_MAC_SWITCHER = "-c";
private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized";
private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found";
private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot");
private static final String DEFAULT_MAC_LINUX_PATH = "/bin/";
private final ClientLogger logger = new ClientLogger(IdentityClient.class);
private final IdentityClientOptions options;
private final PublicClientApplication publicClientApplication;
private final String tenantId;
private final String clientId;
private HttpPipelineAdapter httpPipelineAdapter;
/**
* Creates an IdentityClient with the given options.
*
* @param tenantId the tenant ID of the application.
* @param clientId the client ID of the application.
* @param options the options configuring the client.
*/
IdentityClient(String tenantId, String clientId, IdentityClientOptions options) {
if (tenantId == null) {
tenantId = "common";
}
if (options == null) {
options = new IdentityClientOptions();
}
this.tenantId = tenantId;
this.clientId = clientId;
this.options = options;
if (clientId == null) {
this.publicClientApplication = null;
} else {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId;
PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId);
try {
publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl);
} catch (MalformedURLException e) {
throw logger.logExceptionAsWarning(new IllegalStateException(e));
}
HttpPipeline httpPipeline = options.getHttpPipeline();
if (httpPipeline != null) {
httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline);
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
} else {
HttpClient httpClient = options.getHttpClient();
if (httpClient != null) {
httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient));
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
} else {
httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault()));
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
}
}
if (options.getExecutorService() != null) {
publicClientApplicationBuilder.executorService(options.getExecutorService());
}
this.publicClientApplication = publicClientApplicationBuilder.build();
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
/**
* Asynchronously acquire a token from Active Directory with a client secret.
*
* @param clientSecret the client secret of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromSecret(clientSecret))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
} catch (MalformedURLException e) {
return Mono.error(e);
}
}
private HttpPipeline setupPipeline(HttpClient httpClient) {
List<HttpPipelinePolicy> policies = new ArrayList<>();
HttpLogOptions httpLogOptions = new HttpLogOptions();
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RetryPolicy());
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new HttpPipelineBuilder().httpClient(httpClient)
.policies(policies.toArray(new HttpPipelinePolicy[0])).build();
}
/**
* Asynchronously acquire a token from Active Directory with a PKCS12 certificate.
*
* @param pfxCertificatePath the path to the PKCS12 certificate of the application
* @param pfxCertificatePassword the password protecting the PFX certificate
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword,
TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return Mono.fromCallable(() -> {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
new FileInputStream(pfxCertificatePath), pfxCertificatePassword))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
return applicationBuilder.build();
}).flatMap(application -> Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes())).build())))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory with a PEM certificate.
*
* @param pemCertificatePath the path to the PEM certificate of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath));
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
CertificateUtil.privateKeyFromPem(pemCertificateBytes),
CertificateUtil.publicKeyFromPem(pemCertificateBytes)))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
} catch (IOException e) {
return Mono.error(e);
}
}
/**
* Asynchronously acquire a token from Active Directory with a username and a password.
*
* @param request the details of the token request
* @param username the username of the user
* @param password the password of the user
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request,
String username, String password) {
return Mono.fromFuture(publicClientApplication.acquireToken(
UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray())
.build()))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) {
SilentParameters parameters;
if (msalToken.getAccount() != null) {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build();
} else {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build();
}
return Mono.defer(() -> {
try {
return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters))
.map(ar -> new MsalToken(ar, options));
} catch (MalformedURLException e) {
return Mono.error(e);
}
});
}
/**
* Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide
* a device code for login and the user must meet the challenge by authenticating in a browser on the current or a
* different device.
*
* @param request the details of the token request
* @param deviceCodeConsumer the user provided closure that will consume the device code challenge
* @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device
* code expires
*/
public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request,
Consumer<DeviceCodeInfo> deviceCodeConsumer) {
return Mono.fromFuture(() -> {
DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()),
dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(),
dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build();
return publicClientApplication.acquireToken(parameters);
}).map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow.
*
* @param request the details of the token request
* @param authorizationCode the oauth2 authorization code
* @param redirectUrl the redirectUrl where the authorization code is sent to
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode,
URI redirectUrl) {
return Mono.fromFuture(() -> publicClientApplication.acquireToken(
AuthorizationCodeParameters.builder(authorizationCode, redirectUrl)
.scopes(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The
* credential will run a minimal local HttpServer at the given port, so {@code http:
* listed as a valid reply URL for the application.
*
* @param request the details of the token request
* @param port the port on which the HTTP server is listening
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return AuthorizationCodeListener.create(port)
.flatMap(server -> {
URI redirectUri;
String browserUri;
try {
redirectUri = new URI(String.format("http:
browserUri =
String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt"
+ "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s",
authorityUrl,
clientId,
redirectUri.toString(),
UUID.randomUUID(),
String.join(" ", request.getScopes()));
} catch (URISyntaxException e) {
return server.dispose().then(Mono.error(e));
}
return server.listen()
.mergeWith(Mono.<String>fromRunnable(() -> {
try {
openUrl(browserUri);
} catch (IOException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
}
}).subscribeOn(Schedulers.newSingle("browser")))
.next()
.flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri))
.onErrorResume(t -> server.dispose().then(Mono.error(t)))
.flatMap(msalToken -> server.dispose().then(Mono.just(msalToken)));
});
}
/**
* Asynchronously acquire a token from the App Service Managed Service Identity endpoint.
*
* @param msiEndpoint the endpoint to acquire token from
* @param msiSecret the secret to acquire token with
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret,
TokenRequestContext request) {
return Mono.fromCallable(() -> {
String resource = ScopeUtil.scopesToResource(request.getScopes());
HttpURLConnection connection = null;
StringBuilder payload = new StringBuilder();
payload.append("resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
payload.append("&api-version=");
payload.append(URLEncoder.encode("2017-09-01", "UTF-8"));
if (clientId != null) {
payload.append("&clientid=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
try {
URL url = new URL(String.format("%s?%s", msiEndpoint, payload));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
if (msiSecret != null) {
connection.setRequestProperty("Secret", msiSecret);
}
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name())
.useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
MSIToken msiToken = SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON);
return new IdentityToken(msiToken.getToken(), msiToken.getExpiresAt(), options);
} finally {
if (connection != null) {
connection.disconnect();
}
}
});
}
/**
* Asynchronously acquire a token from the Virtual Machine IMDS endpoint.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) {
String resource = ScopeUtil.scopesToResource(request.getScopes());
StringBuilder payload = new StringBuilder();
final int imdsUpgradeTimeInMs = 70 * 1000;
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
payload.append("&resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
if (clientId != null) {
payload.append("&client_id=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
} catch (IOException exception) {
return Mono.error(exception);
}
return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> {
int retry = 1;
while (retry <= options.getMaxRetry()) {
URL url = null;
HttpURLConnection connection = null;
try {
url =
new URL(String.format("http:
payload.toString()));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name())
.useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
MSIToken msiToken = SERIALIZER_ADAPTER.deserialize(result,
MSIToken.class, SerializerEncoding.JSON);
return new IdentityToken(msiToken.getToken(), msiToken.getExpiresAt(), options);
} catch (IOException exception) {
if (connection == null) {
throw logger.logExceptionAsError(new RuntimeException(
String.format("Could not connect to the url: %s.", url), exception));
}
int responseCode = connection.getResponseCode();
if (responseCode == 410
|| responseCode == 429
|| responseCode == 404
|| (responseCode >= 500 && responseCode <= 599)) {
int retryTimeoutInMs = options.getRetryTimeout()
.apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000;
retryTimeoutInMs =
(responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs
: retryTimeoutInMs;
retry++;
if (retry > options.getMaxRetry()) {
break;
} else {
sleep(retryTimeoutInMs);
}
} else {
throw logger.logExceptionAsError(new RuntimeException(
"Couldn't acquire access token from IMDS, verify your objectId, "
+ "clientId or msiResourceId", exception));
}
} finally {
if (connection != null) {
connection.disconnect();
}
}
}
throw logger.logExceptionAsError(new RuntimeException(
String.format("MSI: Failed to acquire tokens after retrying %s times",
options.getMaxRetry())));
}));
}
private Mono<Boolean> checkIMDSAvailable() {
StringBuilder payload = new StringBuilder();
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
} catch (IOException exception) {
return Mono.error(exception);
}
return Mono.fromCallable(() -> {
HttpURLConnection connection = null;
URL url = new URL(String.format("http:
payload.toString()));
try {
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setConnectTimeout(500);
connection.connect();
} finally {
if (connection != null) {
connection.disconnect();
}
}
return true;
});
}
private static void sleep(int millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException ex) {
throw new IllegalStateException(ex);
}
}
private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) {
switch (options.getType()) {
case SOCKS4:
case SOCKS5:
return new Proxy(Type.SOCKS, options.getAddress());
case HTTP:
default:
return new Proxy(Type.HTTP, options.getAddress());
}
}
private String getSafeWorkingDirectory() {
if (isWindowsPlatform()) {
if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) {
return null;
}
return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32";
} else {
return DEFAULT_MAC_LINUX_PATH;
}
}
private boolean isWindowsPlatform() {
return System.getProperty("os.name").contains("Windows");
}
private String redactInfo(String regex, String input) {
return input.replaceAll(regex, "****");
}
void openUrl(String url) throws IOException {
Runtime rt = Runtime.getRuntime();
String os = System.getProperty("os.name").toLowerCase(Locale.ROOT);
if (os.contains("win")) {
rt.exec("rundll32 url.dll,FileProtocolHandler " + url);
} else if (os.contains("mac")) {
rt.exec("open " + url);
} else if (os.contains("nix") || os.contains("nux")) {
rt.exec("xdg-open " + url);
} else {
logger.error("Browser could not be opened - please open {} in a browser on this device.", url);
}
}
} |
agreed this must have some default timeout. In .NET I set this to 10 seconds. | public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) {
String azCommand = "az account get-access-token --output json --resource ";
StringBuilder command = new StringBuilder();
command.append(azCommand);
String scopes = ScopeUtil.scopesToResource(request.getScopes());
try {
ScopeUtil.validateScope(scopes);
} catch (IllegalArgumentException ex) {
return Mono.error(logger.logExceptionAsError(ex));
}
command.append(scopes);
AccessToken token = null;
BufferedReader reader = null;
try {
String starter;
String switcher;
if (isWindowsPlatform()) {
starter = WINDOWS_STARTER;
switcher = WINDOWS_SWITCHER;
} else {
starter = LINUX_MAC_STARTER;
switcher = LINUX_MAC_SWITCHER;
}
ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString());
String workingDirectory = getSafeWorkingDirectory();
if (workingDirectory != null) {
builder.directory(new File(workingDirectory));
} else {
throw logger.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be"
+ " found to execute CLI command from."));
}
builder.redirectErrorStream(true);
Process process = builder.start();
reader = new BufferedReader(new InputStreamReader(process.getInputStream(), "UTF-8"));
String line;
StringBuilder output = new StringBuilder();
while (true) {
line = reader.readLine();
if (line == null) {
break;
}
if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) {
throw logger.logExceptionAsError(
new ClientAuthenticationException("Azure CLI not installed", null));
}
output.append(line);
}
String processOutput = output.toString();
process.waitFor();
if (process.exitValue() != 0) {
if (processOutput.length() > 0) {
String redactedOutput = redactInfo("\"accessToken\": \"(.*?)(\"|$)", processOutput);
throw logger.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null));
} else {
throw logger.logExceptionAsError(
new ClientAuthenticationException("Failed to invoke Azure CLI ", null));
}
}
Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class,
SerializerEncoding.JSON);
String accessToken = objectMap.get("accessToken");
String time = objectMap.get("expiresOn");
String timeToSecond = time.substring(0, time.indexOf("."));
String timeJoinedWithT = String.join("T", timeToSecond.split(" "));
OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME)
.atZone(ZoneId.systemDefault())
.toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC);
token = new IdentityToken(accessToken, expiresOn, options);
} catch (IOException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
} catch (InterruptedException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
} catch (RuntimeException e) {
return Mono.error(logger.logExceptionAsError(e));
} finally {
try {
if (reader != null) {
reader.close();
}
} catch (IOException ex) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(ex)));
}
}
return Mono.just(token);
} | process.waitFor(); | public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) {
String azCommand = "az account get-access-token --output json --resource ";
StringBuilder command = new StringBuilder();
command.append(azCommand);
String scopes = ScopeUtil.scopesToResource(request.getScopes());
try {
ScopeUtil.validateScope(scopes);
} catch (IllegalArgumentException ex) {
return Mono.error(logger.logExceptionAsError(ex));
}
command.append(scopes);
AccessToken token = null;
BufferedReader reader = null;
try {
String starter;
String switcher;
if (isWindowsPlatform()) {
starter = WINDOWS_STARTER;
switcher = WINDOWS_SWITCHER;
} else {
starter = LINUX_MAC_STARTER;
switcher = LINUX_MAC_SWITCHER;
}
ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString());
String workingDirectory = getSafeWorkingDirectory();
if (workingDirectory != null) {
builder.directory(new File(workingDirectory));
} else {
throw logger.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be"
+ " found to execute CLI command from."));
}
builder.redirectErrorStream(true);
Process process = builder.start();
reader = new BufferedReader(new InputStreamReader(process.getInputStream(), "UTF-8"));
String line;
StringBuilder output = new StringBuilder();
while (true) {
line = reader.readLine();
if (line == null) {
break;
}
if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) {
throw logger.logExceptionAsError(
new ClientAuthenticationException("Azure CLI not installed", null));
}
output.append(line);
}
String processOutput = output.toString();
process.waitFor(10, TimeUnit.SECONDS);
if (process.exitValue() != 0) {
if (processOutput.length() > 0) {
String redactedOutput = redactInfo("\"accessToken\": \"(.*?)(\"|$)", processOutput);
throw logger.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null));
} else {
throw logger.logExceptionAsError(
new ClientAuthenticationException("Failed to invoke Azure CLI ", null));
}
}
Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class,
SerializerEncoding.JSON);
String accessToken = objectMap.get("accessToken");
String time = objectMap.get("expiresOn");
String timeToSecond = time.substring(0, time.indexOf("."));
String timeJoinedWithT = String.join("T", timeToSecond.split(" "));
OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME)
.atZone(ZoneId.systemDefault())
.toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC);
token = new IdentityToken(accessToken, expiresOn, options);
} catch (IOException | InterruptedException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
} catch (RuntimeException e) {
return Mono.error(logger.logExceptionAsError(e));
} finally {
try {
if (reader != null) {
reader.close();
}
} catch (IOException ex) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(ex)));
}
}
return Mono.just(token);
} | class IdentityClient {
private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter();
private static final Random RANDOM = new Random();
private static final String WINDOWS_STARTER = "cmd.exe";
private static final String LINUX_MAC_STARTER = "/bin/sh";
private static final String WINDOWS_SWITCHER = "/c";
private static final String LINUX_MAC_SWITCHER = "-c";
private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized";
private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found";
private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot");
private static final String DEFAULT_MAC_LINUX_PATH = "/bin/";
private final ClientLogger logger = new ClientLogger(IdentityClient.class);
private final IdentityClientOptions options;
private final PublicClientApplication publicClientApplication;
private final String tenantId;
private final String clientId;
private HttpPipelineAdapter httpPipelineAdapter;
/**
* Creates an IdentityClient with the given options.
*
* @param tenantId the tenant ID of the application.
* @param clientId the client ID of the application.
* @param options the options configuring the client.
*/
IdentityClient(String tenantId, String clientId, IdentityClientOptions options) {
if (tenantId == null) {
tenantId = "common";
}
if (options == null) {
options = new IdentityClientOptions();
}
this.tenantId = tenantId;
this.clientId = clientId;
this.options = options;
if (clientId == null) {
this.publicClientApplication = null;
} else {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId;
PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId);
try {
publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl);
} catch (MalformedURLException e) {
throw logger.logExceptionAsWarning(new IllegalStateException(e));
}
HttpPipeline httpPipeline = options.getHttpPipeline();
if (httpPipeline != null) {
httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline);
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
} else {
HttpClient httpClient = options.getHttpClient();
if (httpClient != null) {
httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient));
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
} else {
httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault()));
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
}
}
if (options.getExecutorService() != null) {
publicClientApplicationBuilder.executorService(options.getExecutorService());
}
this.publicClientApplication = publicClientApplicationBuilder.build();
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
/**
* Asynchronously acquire a token from Active Directory with a client secret.
*
* @param clientSecret the client secret of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromSecret(clientSecret))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
} catch (MalformedURLException e) {
return Mono.error(e);
}
}
private HttpPipeline setupPipeline(HttpClient httpClient) {
List<HttpPipelinePolicy> policies = new ArrayList<>();
HttpLogOptions httpLogOptions = new HttpLogOptions();
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RetryPolicy());
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new HttpPipelineBuilder().httpClient(httpClient)
.policies(policies.toArray(new HttpPipelinePolicy[0])).build();
}
/**
* Asynchronously acquire a token from Active Directory with a PKCS12 certificate.
*
* @param pfxCertificatePath the path to the PKCS12 certificate of the application
* @param pfxCertificatePassword the password protecting the PFX certificate
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword,
TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return Mono.fromCallable(() -> {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
new FileInputStream(pfxCertificatePath), pfxCertificatePassword))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
return applicationBuilder.build();
}).flatMap(application -> Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes())).build())))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory with a PEM certificate.
*
* @param pemCertificatePath the path to the PEM certificate of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath));
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
CertificateUtil.privateKeyFromPem(pemCertificateBytes),
CertificateUtil.publicKeyFromPem(pemCertificateBytes)))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
} catch (IOException e) {
return Mono.error(e);
}
}
/**
* Asynchronously acquire a token from Active Directory with a username and a password.
*
* @param request the details of the token request
* @param username the username of the user
* @param password the password of the user
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request,
String username, String password) {
return Mono.fromFuture(publicClientApplication.acquireToken(
UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray())
.build()))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) {
SilentParameters parameters;
if (msalToken.getAccount() != null) {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build();
} else {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build();
}
return Mono.defer(() -> {
try {
return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters))
.map(ar -> new MsalToken(ar, options));
} catch (MalformedURLException e) {
return Mono.error(e);
}
});
}
/**
* Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide
* a device code for login and the user must meet the challenge by authenticating in a browser on the current or a
* different device.
*
* @param request the details of the token request
* @param deviceCodeConsumer the user provided closure that will consume the device code challenge
* @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device
* code expires
*/
public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request,
Consumer<DeviceCodeInfo> deviceCodeConsumer) {
return Mono.fromFuture(() -> {
DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()),
dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(),
dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build();
return publicClientApplication.acquireToken(parameters);
}).map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow.
*
* @param request the details of the token request
* @param authorizationCode the oauth2 authorization code
* @param redirectUrl the redirectUrl where the authorization code is sent to
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode,
URI redirectUrl) {
return Mono.fromFuture(() -> publicClientApplication.acquireToken(
AuthorizationCodeParameters.builder(authorizationCode, redirectUrl)
.scopes(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The
* credential will run a minimal local HttpServer at the given port, so {@code http:
* listed as a valid reply URL for the application.
*
* @param request the details of the token request
* @param port the port on which the HTTP server is listening
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return AuthorizationCodeListener.create(port)
.flatMap(server -> {
URI redirectUri;
String browserUri;
try {
redirectUri = new URI(String.format("http:
browserUri =
String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt"
+ "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s",
authorityUrl,
clientId,
redirectUri.toString(),
UUID.randomUUID(),
String.join(" ", request.getScopes()));
} catch (URISyntaxException e) {
return server.dispose().then(Mono.error(e));
}
return server.listen()
.mergeWith(Mono.<String>fromRunnable(() -> {
try {
openUrl(browserUri);
} catch (IOException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
}
}).subscribeOn(Schedulers.newSingle("browser")))
.next()
.flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri))
.onErrorResume(t -> server.dispose().then(Mono.error(t)))
.flatMap(msalToken -> server.dispose().then(Mono.just(msalToken)));
});
}
/**
* Asynchronously acquire a token from the App Service Managed Service Identity endpoint.
*
* @param msiEndpoint the endpoint to acquire token from
* @param msiSecret the secret to acquire token with
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret,
TokenRequestContext request) {
return Mono.fromCallable(() -> {
String resource = ScopeUtil.scopesToResource(request.getScopes());
HttpURLConnection connection = null;
StringBuilder payload = new StringBuilder();
payload.append("resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
payload.append("&api-version=");
payload.append(URLEncoder.encode("2017-09-01", "UTF-8"));
if (clientId != null) {
payload.append("&clientid=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
try {
URL url = new URL(String.format("%s?%s", msiEndpoint, payload));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
if (msiSecret != null) {
connection.setRequestProperty("Secret", msiSecret);
}
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name())
.useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
MSIToken msiToken = SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON);
return new IdentityToken(msiToken.getToken(), msiToken.getExpiresAt(), options);
} finally {
if (connection != null) {
connection.disconnect();
}
}
});
}
/**
* Asynchronously acquire a token from the Virtual Machine IMDS endpoint.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) {
String resource = ScopeUtil.scopesToResource(request.getScopes());
StringBuilder payload = new StringBuilder();
final int imdsUpgradeTimeInMs = 70 * 1000;
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
payload.append("&resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
if (clientId != null) {
payload.append("&client_id=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
} catch (IOException exception) {
return Mono.error(exception);
}
return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> {
int retry = 1;
while (retry <= options.getMaxRetry()) {
URL url = null;
HttpURLConnection connection = null;
try {
url =
new URL(String.format("http:
payload.toString()));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name())
.useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
MSIToken msiToken = SERIALIZER_ADAPTER.deserialize(result,
MSIToken.class, SerializerEncoding.JSON);
return new IdentityToken(msiToken.getToken(), msiToken.getExpiresAt(), options);
} catch (IOException exception) {
if (connection == null) {
throw logger.logExceptionAsError(new RuntimeException(
String.format("Could not connect to the url: %s.", url), exception));
}
int responseCode = connection.getResponseCode();
if (responseCode == 410
|| responseCode == 429
|| responseCode == 404
|| (responseCode >= 500 && responseCode <= 599)) {
int retryTimeoutInMs = options.getRetryTimeout()
.apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000;
retryTimeoutInMs =
(responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs
: retryTimeoutInMs;
retry++;
if (retry > options.getMaxRetry()) {
break;
} else {
sleep(retryTimeoutInMs);
}
} else {
throw logger.logExceptionAsError(new RuntimeException(
"Couldn't acquire access token from IMDS, verify your objectId, "
+ "clientId or msiResourceId", exception));
}
} finally {
if (connection != null) {
connection.disconnect();
}
}
}
throw logger.logExceptionAsError(new RuntimeException(
String.format("MSI: Failed to acquire tokens after retrying %s times",
options.getMaxRetry())));
}));
}
private Mono<Boolean> checkIMDSAvailable() {
StringBuilder payload = new StringBuilder();
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
} catch (IOException exception) {
return Mono.error(exception);
}
return Mono.fromCallable(() -> {
HttpURLConnection connection = null;
URL url = new URL(String.format("http:
payload.toString()));
try {
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setConnectTimeout(500);
connection.connect();
} finally {
if (connection != null) {
connection.disconnect();
}
}
return true;
});
}
private static void sleep(int millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException ex) {
throw new IllegalStateException(ex);
}
}
private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) {
switch (options.getType()) {
case SOCKS4:
case SOCKS5:
return new Proxy(Type.SOCKS, options.getAddress());
case HTTP:
default:
return new Proxy(Type.HTTP, options.getAddress());
}
}
private String getSafeWorkingDirectory() {
if (isWindowsPlatform()) {
if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) {
return null;
}
return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32";
} else {
return DEFAULT_MAC_LINUX_PATH;
}
}
private boolean isWindowsPlatform() {
return System.getProperty("os.name").contains("Windows");
}
private String redactInfo(String regex, String input) {
return input.replaceAll(regex, "****");
}
void openUrl(String url) throws IOException {
Runtime rt = Runtime.getRuntime();
String os = System.getProperty("os.name").toLowerCase(Locale.ROOT);
if (os.contains("win")) {
rt.exec("rundll32 url.dll,FileProtocolHandler " + url);
} else if (os.contains("mac")) {
rt.exec("open " + url);
} else if (os.contains("nix") || os.contains("nux")) {
rt.exec("xdg-open " + url);
} else {
logger.error("Browser could not be opened - please open {} in a browser on this device.", url);
}
}
} | class IdentityClient {
private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter();
private static final Random RANDOM = new Random();
private static final String WINDOWS_STARTER = "cmd.exe";
private static final String LINUX_MAC_STARTER = "/bin/sh";
private static final String WINDOWS_SWITCHER = "/c";
private static final String LINUX_MAC_SWITCHER = "-c";
private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized";
private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found";
private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot");
private static final String DEFAULT_MAC_LINUX_PATH = "/bin/";
private final ClientLogger logger = new ClientLogger(IdentityClient.class);
private final IdentityClientOptions options;
private final PublicClientApplication publicClientApplication;
private final String tenantId;
private final String clientId;
private HttpPipelineAdapter httpPipelineAdapter;
/**
* Creates an IdentityClient with the given options.
*
* @param tenantId the tenant ID of the application.
* @param clientId the client ID of the application.
* @param options the options configuring the client.
*/
IdentityClient(String tenantId, String clientId, IdentityClientOptions options) {
if (tenantId == null) {
tenantId = "common";
}
if (options == null) {
options = new IdentityClientOptions();
}
this.tenantId = tenantId;
this.clientId = clientId;
this.options = options;
if (clientId == null) {
this.publicClientApplication = null;
} else {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId;
PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId);
try {
publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl);
} catch (MalformedURLException e) {
throw logger.logExceptionAsWarning(new IllegalStateException(e));
}
HttpPipeline httpPipeline = options.getHttpPipeline();
if (httpPipeline != null) {
httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline);
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
} else {
HttpClient httpClient = options.getHttpClient();
if (httpClient != null) {
httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient));
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
} else {
httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault()));
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
}
}
if (options.getExecutorService() != null) {
publicClientApplicationBuilder.executorService(options.getExecutorService());
}
this.publicClientApplication = publicClientApplicationBuilder.build();
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
/**
* Asynchronously acquire a token from Active Directory with a client secret.
*
* @param clientSecret the client secret of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromSecret(clientSecret))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
} catch (MalformedURLException e) {
return Mono.error(e);
}
}
private HttpPipeline setupPipeline(HttpClient httpClient) {
List<HttpPipelinePolicy> policies = new ArrayList<>();
HttpLogOptions httpLogOptions = new HttpLogOptions();
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RetryPolicy());
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new HttpPipelineBuilder().httpClient(httpClient)
.policies(policies.toArray(new HttpPipelinePolicy[0])).build();
}
/**
* Asynchronously acquire a token from Active Directory with a PKCS12 certificate.
*
* @param pfxCertificatePath the path to the PKCS12 certificate of the application
* @param pfxCertificatePassword the password protecting the PFX certificate
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword,
TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return Mono.fromCallable(() -> {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
new FileInputStream(pfxCertificatePath), pfxCertificatePassword))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
return applicationBuilder.build();
}).flatMap(application -> Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes())).build())))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory with a PEM certificate.
*
* @param pemCertificatePath the path to the PEM certificate of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath));
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
CertificateUtil.privateKeyFromPem(pemCertificateBytes),
CertificateUtil.publicKeyFromPem(pemCertificateBytes)))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
} catch (IOException e) {
return Mono.error(e);
}
}
/**
* Asynchronously acquire a token from Active Directory with a username and a password.
*
* @param request the details of the token request
* @param username the username of the user
* @param password the password of the user
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request,
String username, String password) {
return Mono.fromFuture(publicClientApplication.acquireToken(
UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray())
.build()))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) {
SilentParameters parameters;
if (msalToken.getAccount() != null) {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build();
} else {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build();
}
return Mono.defer(() -> {
try {
return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters))
.map(ar -> new MsalToken(ar, options));
} catch (MalformedURLException e) {
return Mono.error(e);
}
});
}
/**
* Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide
* a device code for login and the user must meet the challenge by authenticating in a browser on the current or a
* different device.
*
* @param request the details of the token request
* @param deviceCodeConsumer the user provided closure that will consume the device code challenge
* @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device
* code expires
*/
public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request,
Consumer<DeviceCodeInfo> deviceCodeConsumer) {
return Mono.fromFuture(() -> {
DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()),
dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(),
dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build();
return publicClientApplication.acquireToken(parameters);
}).map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow.
*
* @param request the details of the token request
* @param authorizationCode the oauth2 authorization code
* @param redirectUrl the redirectUrl where the authorization code is sent to
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode,
URI redirectUrl) {
return Mono.fromFuture(() -> publicClientApplication.acquireToken(
AuthorizationCodeParameters.builder(authorizationCode, redirectUrl)
.scopes(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The
* credential will run a minimal local HttpServer at the given port, so {@code http:
* listed as a valid reply URL for the application.
*
* @param request the details of the token request
* @param port the port on which the HTTP server is listening
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return AuthorizationCodeListener.create(port)
.flatMap(server -> {
URI redirectUri;
String browserUri;
try {
redirectUri = new URI(String.format("http:
browserUri =
String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt"
+ "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s",
authorityUrl,
clientId,
redirectUri.toString(),
UUID.randomUUID(),
String.join(" ", request.getScopes()));
} catch (URISyntaxException e) {
return server.dispose().then(Mono.error(e));
}
return server.listen()
.mergeWith(Mono.<String>fromRunnable(() -> {
try {
openUrl(browserUri);
} catch (IOException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
}
}).subscribeOn(Schedulers.newSingle("browser")))
.next()
.flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri))
.onErrorResume(t -> server.dispose().then(Mono.error(t)))
.flatMap(msalToken -> server.dispose().then(Mono.just(msalToken)));
});
}
/**
* Asynchronously acquire a token from the App Service Managed Service Identity endpoint.
*
* @param msiEndpoint the endpoint to acquire token from
* @param msiSecret the secret to acquire token with
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret,
TokenRequestContext request) {
return Mono.fromCallable(() -> {
String resource = ScopeUtil.scopesToResource(request.getScopes());
HttpURLConnection connection = null;
StringBuilder payload = new StringBuilder();
payload.append("resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
payload.append("&api-version=");
payload.append(URLEncoder.encode("2017-09-01", "UTF-8"));
if (clientId != null) {
payload.append("&clientid=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
try {
URL url = new URL(String.format("%s?%s", msiEndpoint, payload));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
if (msiSecret != null) {
connection.setRequestProperty("Secret", msiSecret);
}
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name())
.useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
MSIToken msiToken = SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON);
return new IdentityToken(msiToken.getToken(), msiToken.getExpiresAt(), options);
} finally {
if (connection != null) {
connection.disconnect();
}
}
});
}
/**
* Asynchronously acquire a token from the Virtual Machine IMDS endpoint.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) {
String resource = ScopeUtil.scopesToResource(request.getScopes());
StringBuilder payload = new StringBuilder();
final int imdsUpgradeTimeInMs = 70 * 1000;
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
payload.append("&resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
if (clientId != null) {
payload.append("&client_id=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
} catch (IOException exception) {
return Mono.error(exception);
}
return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> {
int retry = 1;
while (retry <= options.getMaxRetry()) {
URL url = null;
HttpURLConnection connection = null;
try {
url =
new URL(String.format("http:
payload.toString()));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name())
.useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
MSIToken msiToken = SERIALIZER_ADAPTER.deserialize(result,
MSIToken.class, SerializerEncoding.JSON);
return new IdentityToken(msiToken.getToken(), msiToken.getExpiresAt(), options);
} catch (IOException exception) {
if (connection == null) {
throw logger.logExceptionAsError(new RuntimeException(
String.format("Could not connect to the url: %s.", url), exception));
}
int responseCode = connection.getResponseCode();
if (responseCode == 410
|| responseCode == 429
|| responseCode == 404
|| (responseCode >= 500 && responseCode <= 599)) {
int retryTimeoutInMs = options.getRetryTimeout()
.apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000;
retryTimeoutInMs =
(responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs
: retryTimeoutInMs;
retry++;
if (retry > options.getMaxRetry()) {
break;
} else {
sleep(retryTimeoutInMs);
}
} else {
throw logger.logExceptionAsError(new RuntimeException(
"Couldn't acquire access token from IMDS, verify your objectId, "
+ "clientId or msiResourceId", exception));
}
} finally {
if (connection != null) {
connection.disconnect();
}
}
}
throw logger.logExceptionAsError(new RuntimeException(
String.format("MSI: Failed to acquire tokens after retrying %s times",
options.getMaxRetry())));
}));
}
private Mono<Boolean> checkIMDSAvailable() {
StringBuilder payload = new StringBuilder();
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
} catch (IOException exception) {
return Mono.error(exception);
}
return Mono.fromCallable(() -> {
HttpURLConnection connection = null;
URL url = new URL(String.format("http:
payload.toString()));
try {
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setConnectTimeout(500);
connection.connect();
} finally {
if (connection != null) {
connection.disconnect();
}
}
return true;
});
}
private static void sleep(int millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException ex) {
throw new IllegalStateException(ex);
}
}
private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) {
switch (options.getType()) {
case SOCKS4:
case SOCKS5:
return new Proxy(Type.SOCKS, options.getAddress());
case HTTP:
default:
return new Proxy(Type.HTTP, options.getAddress());
}
}
private String getSafeWorkingDirectory() {
if (isWindowsPlatform()) {
if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) {
return null;
}
return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32";
} else {
return DEFAULT_MAC_LINUX_PATH;
}
}
private boolean isWindowsPlatform() {
return System.getProperty("os.name").contains("Windows");
}
private String redactInfo(String regex, String input) {
return input.replaceAll(regex, "****");
}
void openUrl(String url) throws IOException {
Runtime rt = Runtime.getRuntime();
String os = System.getProperty("os.name").toLowerCase(Locale.ROOT);
if (os.contains("win")) {
rt.exec("rundll32 url.dll,FileProtocolHandler " + url);
} else if (os.contains("mac")) {
rt.exec("open " + url);
} else if (os.contains("nix") || os.contains("nux")) {
rt.exec("xdg-open " + url);
} else {
logger.error("Browser could not be opened - please open {} in a browser on this device.", url);
}
}
} |
```suggestion } catch (IOException | InterruptedException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } ``` | public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) {
String azCommand = "az account get-access-token --output json --resource ";
StringBuilder command = new StringBuilder();
command.append(azCommand);
String scopes = ScopeUtil.scopesToResource(request.getScopes());
try {
ScopeUtil.validateScope(scopes);
} catch (IllegalArgumentException ex) {
return Mono.error(logger.logExceptionAsError(ex));
}
command.append(scopes);
AccessToken token = null;
BufferedReader reader = null;
try {
String starter;
String switcher;
if (isWindowsPlatform()) {
starter = WINDOWS_STARTER;
switcher = WINDOWS_SWITCHER;
} else {
starter = LINUX_MAC_STARTER;
switcher = LINUX_MAC_SWITCHER;
}
ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString());
String workingDirectory = getSafeWorkingDirectory();
if (workingDirectory != null) {
builder.directory(new File(workingDirectory));
} else {
throw logger.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be"
+ " found to execute CLI command from."));
}
builder.redirectErrorStream(true);
Process process = builder.start();
reader = new BufferedReader(new InputStreamReader(process.getInputStream(), "UTF-8"));
String line;
StringBuilder output = new StringBuilder();
while (true) {
line = reader.readLine();
if (line == null) {
break;
}
if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) {
throw logger.logExceptionAsError(
new ClientAuthenticationException("Azure CLI not installed", null));
}
output.append(line);
}
String processOutput = output.toString();
process.waitFor();
if (process.exitValue() != 0) {
if (processOutput.length() > 0) {
String redactedOutput = redactInfo("\"accessToken\": \"(.*?)(\"|$)", processOutput);
throw logger.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null));
} else {
throw logger.logExceptionAsError(
new ClientAuthenticationException("Failed to invoke Azure CLI ", null));
}
}
Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class,
SerializerEncoding.JSON);
String accessToken = objectMap.get("accessToken");
String time = objectMap.get("expiresOn");
String timeToSecond = time.substring(0, time.indexOf("."));
String timeJoinedWithT = String.join("T", timeToSecond.split(" "));
OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME)
.atZone(ZoneId.systemDefault())
.toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC);
token = new IdentityToken(accessToken, expiresOn, options);
} catch (IOException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
} catch (InterruptedException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
} catch (RuntimeException e) {
return Mono.error(logger.logExceptionAsError(e));
} finally {
try {
if (reader != null) {
reader.close();
}
} catch (IOException ex) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(ex)));
}
}
return Mono.just(token);
} | throw logger.logExceptionAsError(new IllegalStateException(e)); | public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) {
String azCommand = "az account get-access-token --output json --resource ";
StringBuilder command = new StringBuilder();
command.append(azCommand);
String scopes = ScopeUtil.scopesToResource(request.getScopes());
try {
ScopeUtil.validateScope(scopes);
} catch (IllegalArgumentException ex) {
return Mono.error(logger.logExceptionAsError(ex));
}
command.append(scopes);
AccessToken token = null;
BufferedReader reader = null;
try {
String starter;
String switcher;
if (isWindowsPlatform()) {
starter = WINDOWS_STARTER;
switcher = WINDOWS_SWITCHER;
} else {
starter = LINUX_MAC_STARTER;
switcher = LINUX_MAC_SWITCHER;
}
ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString());
String workingDirectory = getSafeWorkingDirectory();
if (workingDirectory != null) {
builder.directory(new File(workingDirectory));
} else {
throw logger.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be"
+ " found to execute CLI command from."));
}
builder.redirectErrorStream(true);
Process process = builder.start();
reader = new BufferedReader(new InputStreamReader(process.getInputStream(), "UTF-8"));
String line;
StringBuilder output = new StringBuilder();
while (true) {
line = reader.readLine();
if (line == null) {
break;
}
if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) {
throw logger.logExceptionAsError(
new ClientAuthenticationException("Azure CLI not installed", null));
}
output.append(line);
}
String processOutput = output.toString();
process.waitFor(10, TimeUnit.SECONDS);
if (process.exitValue() != 0) {
if (processOutput.length() > 0) {
String redactedOutput = redactInfo("\"accessToken\": \"(.*?)(\"|$)", processOutput);
throw logger.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null));
} else {
throw logger.logExceptionAsError(
new ClientAuthenticationException("Failed to invoke Azure CLI ", null));
}
}
Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class,
SerializerEncoding.JSON);
String accessToken = objectMap.get("accessToken");
String time = objectMap.get("expiresOn");
String timeToSecond = time.substring(0, time.indexOf("."));
String timeJoinedWithT = String.join("T", timeToSecond.split(" "));
OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME)
.atZone(ZoneId.systemDefault())
.toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC);
token = new IdentityToken(accessToken, expiresOn, options);
} catch (IOException | InterruptedException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
} catch (RuntimeException e) {
return Mono.error(logger.logExceptionAsError(e));
} finally {
try {
if (reader != null) {
reader.close();
}
} catch (IOException ex) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(ex)));
}
}
return Mono.just(token);
} | class IdentityClient {
private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter();
private static final Random RANDOM = new Random();
private static final String WINDOWS_STARTER = "cmd.exe";
private static final String LINUX_MAC_STARTER = "/bin/sh";
private static final String WINDOWS_SWITCHER = "/c";
private static final String LINUX_MAC_SWITCHER = "-c";
private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized";
private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found";
private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot");
private static final String DEFAULT_MAC_LINUX_PATH = "/bin/";
private final ClientLogger logger = new ClientLogger(IdentityClient.class);
private final IdentityClientOptions options;
private final PublicClientApplication publicClientApplication;
private final String tenantId;
private final String clientId;
private HttpPipelineAdapter httpPipelineAdapter;
/**
* Creates an IdentityClient with the given options.
*
* @param tenantId the tenant ID of the application.
* @param clientId the client ID of the application.
* @param options the options configuring the client.
*/
IdentityClient(String tenantId, String clientId, IdentityClientOptions options) {
if (tenantId == null) {
tenantId = "common";
}
if (options == null) {
options = new IdentityClientOptions();
}
this.tenantId = tenantId;
this.clientId = clientId;
this.options = options;
if (clientId == null) {
this.publicClientApplication = null;
} else {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId;
PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId);
try {
publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl);
} catch (MalformedURLException e) {
throw logger.logExceptionAsWarning(new IllegalStateException(e));
}
HttpPipeline httpPipeline = options.getHttpPipeline();
if (httpPipeline != null) {
httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline);
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
} else {
HttpClient httpClient = options.getHttpClient();
if (httpClient != null) {
httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient));
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
} else {
httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault()));
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
}
}
if (options.getExecutorService() != null) {
publicClientApplicationBuilder.executorService(options.getExecutorService());
}
this.publicClientApplication = publicClientApplicationBuilder.build();
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
/**
* Asynchronously acquire a token from Active Directory with a client secret.
*
* @param clientSecret the client secret of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromSecret(clientSecret))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
} catch (MalformedURLException e) {
return Mono.error(e);
}
}
private HttpPipeline setupPipeline(HttpClient httpClient) {
List<HttpPipelinePolicy> policies = new ArrayList<>();
HttpLogOptions httpLogOptions = new HttpLogOptions();
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RetryPolicy());
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new HttpPipelineBuilder().httpClient(httpClient)
.policies(policies.toArray(new HttpPipelinePolicy[0])).build();
}
/**
* Asynchronously acquire a token from Active Directory with a PKCS12 certificate.
*
* @param pfxCertificatePath the path to the PKCS12 certificate of the application
* @param pfxCertificatePassword the password protecting the PFX certificate
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword,
TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return Mono.fromCallable(() -> {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
new FileInputStream(pfxCertificatePath), pfxCertificatePassword))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
return applicationBuilder.build();
}).flatMap(application -> Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes())).build())))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory with a PEM certificate.
*
* @param pemCertificatePath the path to the PEM certificate of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath));
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
CertificateUtil.privateKeyFromPem(pemCertificateBytes),
CertificateUtil.publicKeyFromPem(pemCertificateBytes)))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
} catch (IOException e) {
return Mono.error(e);
}
}
/**
* Asynchronously acquire a token from Active Directory with a username and a password.
*
* @param request the details of the token request
* @param username the username of the user
* @param password the password of the user
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request,
String username, String password) {
return Mono.fromFuture(publicClientApplication.acquireToken(
UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray())
.build()))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) {
SilentParameters parameters;
if (msalToken.getAccount() != null) {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build();
} else {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build();
}
return Mono.defer(() -> {
try {
return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters))
.map(ar -> new MsalToken(ar, options));
} catch (MalformedURLException e) {
return Mono.error(e);
}
});
}
/**
* Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide
* a device code for login and the user must meet the challenge by authenticating in a browser on the current or a
* different device.
*
* @param request the details of the token request
* @param deviceCodeConsumer the user provided closure that will consume the device code challenge
* @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device
* code expires
*/
public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request,
Consumer<DeviceCodeInfo> deviceCodeConsumer) {
return Mono.fromFuture(() -> {
DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()),
dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(),
dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build();
return publicClientApplication.acquireToken(parameters);
}).map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow.
*
* @param request the details of the token request
* @param authorizationCode the oauth2 authorization code
* @param redirectUrl the redirectUrl where the authorization code is sent to
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode,
URI redirectUrl) {
return Mono.fromFuture(() -> publicClientApplication.acquireToken(
AuthorizationCodeParameters.builder(authorizationCode, redirectUrl)
.scopes(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The
* credential will run a minimal local HttpServer at the given port, so {@code http:
* listed as a valid reply URL for the application.
*
* @param request the details of the token request
* @param port the port on which the HTTP server is listening
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return AuthorizationCodeListener.create(port)
.flatMap(server -> {
URI redirectUri;
String browserUri;
try {
redirectUri = new URI(String.format("http:
browserUri =
String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt"
+ "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s",
authorityUrl,
clientId,
redirectUri.toString(),
UUID.randomUUID(),
String.join(" ", request.getScopes()));
} catch (URISyntaxException e) {
return server.dispose().then(Mono.error(e));
}
return server.listen()
.mergeWith(Mono.<String>fromRunnable(() -> {
try {
openUrl(browserUri);
} catch (IOException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
}
}).subscribeOn(Schedulers.newSingle("browser")))
.next()
.flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri))
.onErrorResume(t -> server.dispose().then(Mono.error(t)))
.flatMap(msalToken -> server.dispose().then(Mono.just(msalToken)));
});
}
/**
* Asynchronously acquire a token from the App Service Managed Service Identity endpoint.
*
* @param msiEndpoint the endpoint to acquire token from
* @param msiSecret the secret to acquire token with
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret,
TokenRequestContext request) {
return Mono.fromCallable(() -> {
String resource = ScopeUtil.scopesToResource(request.getScopes());
HttpURLConnection connection = null;
StringBuilder payload = new StringBuilder();
payload.append("resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
payload.append("&api-version=");
payload.append(URLEncoder.encode("2017-09-01", "UTF-8"));
if (clientId != null) {
payload.append("&clientid=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
try {
URL url = new URL(String.format("%s?%s", msiEndpoint, payload));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
if (msiSecret != null) {
connection.setRequestProperty("Secret", msiSecret);
}
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name())
.useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
MSIToken msiToken = SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON);
return new IdentityToken(msiToken.getToken(), msiToken.getExpiresAt(), options);
} finally {
if (connection != null) {
connection.disconnect();
}
}
});
}
/**
* Asynchronously acquire a token from the Virtual Machine IMDS endpoint.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) {
String resource = ScopeUtil.scopesToResource(request.getScopes());
StringBuilder payload = new StringBuilder();
final int imdsUpgradeTimeInMs = 70 * 1000;
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
payload.append("&resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
if (clientId != null) {
payload.append("&client_id=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
} catch (IOException exception) {
return Mono.error(exception);
}
return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> {
int retry = 1;
while (retry <= options.getMaxRetry()) {
URL url = null;
HttpURLConnection connection = null;
try {
url =
new URL(String.format("http:
payload.toString()));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name())
.useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
MSIToken msiToken = SERIALIZER_ADAPTER.deserialize(result,
MSIToken.class, SerializerEncoding.JSON);
return new IdentityToken(msiToken.getToken(), msiToken.getExpiresAt(), options);
} catch (IOException exception) {
if (connection == null) {
throw logger.logExceptionAsError(new RuntimeException(
String.format("Could not connect to the url: %s.", url), exception));
}
int responseCode = connection.getResponseCode();
if (responseCode == 410
|| responseCode == 429
|| responseCode == 404
|| (responseCode >= 500 && responseCode <= 599)) {
int retryTimeoutInMs = options.getRetryTimeout()
.apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000;
retryTimeoutInMs =
(responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs
: retryTimeoutInMs;
retry++;
if (retry > options.getMaxRetry()) {
break;
} else {
sleep(retryTimeoutInMs);
}
} else {
throw logger.logExceptionAsError(new RuntimeException(
"Couldn't acquire access token from IMDS, verify your objectId, "
+ "clientId or msiResourceId", exception));
}
} finally {
if (connection != null) {
connection.disconnect();
}
}
}
throw logger.logExceptionAsError(new RuntimeException(
String.format("MSI: Failed to acquire tokens after retrying %s times",
options.getMaxRetry())));
}));
}
private Mono<Boolean> checkIMDSAvailable() {
StringBuilder payload = new StringBuilder();
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
} catch (IOException exception) {
return Mono.error(exception);
}
return Mono.fromCallable(() -> {
HttpURLConnection connection = null;
URL url = new URL(String.format("http:
payload.toString()));
try {
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setConnectTimeout(500);
connection.connect();
} finally {
if (connection != null) {
connection.disconnect();
}
}
return true;
});
}
private static void sleep(int millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException ex) {
throw new IllegalStateException(ex);
}
}
private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) {
switch (options.getType()) {
case SOCKS4:
case SOCKS5:
return new Proxy(Type.SOCKS, options.getAddress());
case HTTP:
default:
return new Proxy(Type.HTTP, options.getAddress());
}
}
private String getSafeWorkingDirectory() {
if (isWindowsPlatform()) {
if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) {
return null;
}
return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32";
} else {
return DEFAULT_MAC_LINUX_PATH;
}
}
private boolean isWindowsPlatform() {
return System.getProperty("os.name").contains("Windows");
}
private String redactInfo(String regex, String input) {
return input.replaceAll(regex, "****");
}
void openUrl(String url) throws IOException {
Runtime rt = Runtime.getRuntime();
String os = System.getProperty("os.name").toLowerCase(Locale.ROOT);
if (os.contains("win")) {
rt.exec("rundll32 url.dll,FileProtocolHandler " + url);
} else if (os.contains("mac")) {
rt.exec("open " + url);
} else if (os.contains("nix") || os.contains("nux")) {
rt.exec("xdg-open " + url);
} else {
logger.error("Browser could not be opened - please open {} in a browser on this device.", url);
}
}
} | class IdentityClient {
private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter();
private static final Random RANDOM = new Random();
private static final String WINDOWS_STARTER = "cmd.exe";
private static final String LINUX_MAC_STARTER = "/bin/sh";
private static final String WINDOWS_SWITCHER = "/c";
private static final String LINUX_MAC_SWITCHER = "-c";
private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized";
private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found";
private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot");
private static final String DEFAULT_MAC_LINUX_PATH = "/bin/";
private final ClientLogger logger = new ClientLogger(IdentityClient.class);
private final IdentityClientOptions options;
private final PublicClientApplication publicClientApplication;
private final String tenantId;
private final String clientId;
private HttpPipelineAdapter httpPipelineAdapter;
/**
* Creates an IdentityClient with the given options.
*
* @param tenantId the tenant ID of the application.
* @param clientId the client ID of the application.
* @param options the options configuring the client.
*/
IdentityClient(String tenantId, String clientId, IdentityClientOptions options) {
if (tenantId == null) {
tenantId = "common";
}
if (options == null) {
options = new IdentityClientOptions();
}
this.tenantId = tenantId;
this.clientId = clientId;
this.options = options;
if (clientId == null) {
this.publicClientApplication = null;
} else {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId;
PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId);
try {
publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl);
} catch (MalformedURLException e) {
throw logger.logExceptionAsWarning(new IllegalStateException(e));
}
HttpPipeline httpPipeline = options.getHttpPipeline();
if (httpPipeline != null) {
httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline);
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
} else {
HttpClient httpClient = options.getHttpClient();
if (httpClient != null) {
httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient));
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
} else {
httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault()));
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
}
}
if (options.getExecutorService() != null) {
publicClientApplicationBuilder.executorService(options.getExecutorService());
}
this.publicClientApplication = publicClientApplicationBuilder.build();
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
/**
* Asynchronously acquire a token from Active Directory with a client secret.
*
* @param clientSecret the client secret of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromSecret(clientSecret))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
} catch (MalformedURLException e) {
return Mono.error(e);
}
}
private HttpPipeline setupPipeline(HttpClient httpClient) {
List<HttpPipelinePolicy> policies = new ArrayList<>();
HttpLogOptions httpLogOptions = new HttpLogOptions();
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RetryPolicy());
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new HttpPipelineBuilder().httpClient(httpClient)
.policies(policies.toArray(new HttpPipelinePolicy[0])).build();
}
/**
* Asynchronously acquire a token from Active Directory with a PKCS12 certificate.
*
* @param pfxCertificatePath the path to the PKCS12 certificate of the application
* @param pfxCertificatePassword the password protecting the PFX certificate
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword,
TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return Mono.fromCallable(() -> {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
new FileInputStream(pfxCertificatePath), pfxCertificatePassword))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
return applicationBuilder.build();
}).flatMap(application -> Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes())).build())))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory with a PEM certificate.
*
* @param pemCertificatePath the path to the PEM certificate of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath));
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
CertificateUtil.privateKeyFromPem(pemCertificateBytes),
CertificateUtil.publicKeyFromPem(pemCertificateBytes)))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
} catch (IOException e) {
return Mono.error(e);
}
}
/**
* Asynchronously acquire a token from Active Directory with a username and a password.
*
* @param request the details of the token request
* @param username the username of the user
* @param password the password of the user
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request,
String username, String password) {
return Mono.fromFuture(publicClientApplication.acquireToken(
UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray())
.build()))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) {
SilentParameters parameters;
if (msalToken.getAccount() != null) {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build();
} else {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build();
}
return Mono.defer(() -> {
try {
return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters))
.map(ar -> new MsalToken(ar, options));
} catch (MalformedURLException e) {
return Mono.error(e);
}
});
}
/**
* Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide
* a device code for login and the user must meet the challenge by authenticating in a browser on the current or a
* different device.
*
* @param request the details of the token request
* @param deviceCodeConsumer the user provided closure that will consume the device code challenge
* @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device
* code expires
*/
public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request,
Consumer<DeviceCodeInfo> deviceCodeConsumer) {
return Mono.fromFuture(() -> {
DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()),
dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(),
dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build();
return publicClientApplication.acquireToken(parameters);
}).map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow.
*
* @param request the details of the token request
* @param authorizationCode the oauth2 authorization code
* @param redirectUrl the redirectUrl where the authorization code is sent to
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode,
URI redirectUrl) {
return Mono.fromFuture(() -> publicClientApplication.acquireToken(
AuthorizationCodeParameters.builder(authorizationCode, redirectUrl)
.scopes(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The
* credential will run a minimal local HttpServer at the given port, so {@code http:
* listed as a valid reply URL for the application.
*
* @param request the details of the token request
* @param port the port on which the HTTP server is listening
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return AuthorizationCodeListener.create(port)
.flatMap(server -> {
URI redirectUri;
String browserUri;
try {
redirectUri = new URI(String.format("http:
browserUri =
String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt"
+ "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s",
authorityUrl,
clientId,
redirectUri.toString(),
UUID.randomUUID(),
String.join(" ", request.getScopes()));
} catch (URISyntaxException e) {
return server.dispose().then(Mono.error(e));
}
return server.listen()
.mergeWith(Mono.<String>fromRunnable(() -> {
try {
openUrl(browserUri);
} catch (IOException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
}
}).subscribeOn(Schedulers.newSingle("browser")))
.next()
.flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri))
.onErrorResume(t -> server.dispose().then(Mono.error(t)))
.flatMap(msalToken -> server.dispose().then(Mono.just(msalToken)));
});
}
/**
* Asynchronously acquire a token from the App Service Managed Service Identity endpoint.
*
* @param msiEndpoint the endpoint to acquire token from
* @param msiSecret the secret to acquire token with
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret,
TokenRequestContext request) {
return Mono.fromCallable(() -> {
String resource = ScopeUtil.scopesToResource(request.getScopes());
HttpURLConnection connection = null;
StringBuilder payload = new StringBuilder();
payload.append("resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
payload.append("&api-version=");
payload.append(URLEncoder.encode("2017-09-01", "UTF-8"));
if (clientId != null) {
payload.append("&clientid=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
try {
URL url = new URL(String.format("%s?%s", msiEndpoint, payload));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
if (msiSecret != null) {
connection.setRequestProperty("Secret", msiSecret);
}
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name())
.useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
MSIToken msiToken = SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON);
return new IdentityToken(msiToken.getToken(), msiToken.getExpiresAt(), options);
} finally {
if (connection != null) {
connection.disconnect();
}
}
});
}
/**
* Asynchronously acquire a token from the Virtual Machine IMDS endpoint.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) {
String resource = ScopeUtil.scopesToResource(request.getScopes());
StringBuilder payload = new StringBuilder();
final int imdsUpgradeTimeInMs = 70 * 1000;
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
payload.append("&resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
if (clientId != null) {
payload.append("&client_id=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
} catch (IOException exception) {
return Mono.error(exception);
}
return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> {
int retry = 1;
while (retry <= options.getMaxRetry()) {
URL url = null;
HttpURLConnection connection = null;
try {
url =
new URL(String.format("http:
payload.toString()));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name())
.useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
MSIToken msiToken = SERIALIZER_ADAPTER.deserialize(result,
MSIToken.class, SerializerEncoding.JSON);
return new IdentityToken(msiToken.getToken(), msiToken.getExpiresAt(), options);
} catch (IOException exception) {
if (connection == null) {
throw logger.logExceptionAsError(new RuntimeException(
String.format("Could not connect to the url: %s.", url), exception));
}
int responseCode = connection.getResponseCode();
if (responseCode == 410
|| responseCode == 429
|| responseCode == 404
|| (responseCode >= 500 && responseCode <= 599)) {
int retryTimeoutInMs = options.getRetryTimeout()
.apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000;
retryTimeoutInMs =
(responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs
: retryTimeoutInMs;
retry++;
if (retry > options.getMaxRetry()) {
break;
} else {
sleep(retryTimeoutInMs);
}
} else {
throw logger.logExceptionAsError(new RuntimeException(
"Couldn't acquire access token from IMDS, verify your objectId, "
+ "clientId or msiResourceId", exception));
}
} finally {
if (connection != null) {
connection.disconnect();
}
}
}
throw logger.logExceptionAsError(new RuntimeException(
String.format("MSI: Failed to acquire tokens after retrying %s times",
options.getMaxRetry())));
}));
}
private Mono<Boolean> checkIMDSAvailable() {
StringBuilder payload = new StringBuilder();
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
} catch (IOException exception) {
return Mono.error(exception);
}
return Mono.fromCallable(() -> {
HttpURLConnection connection = null;
URL url = new URL(String.format("http:
payload.toString()));
try {
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setConnectTimeout(500);
connection.connect();
} finally {
if (connection != null) {
connection.disconnect();
}
}
return true;
});
}
private static void sleep(int millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException ex) {
throw new IllegalStateException(ex);
}
}
private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) {
switch (options.getType()) {
case SOCKS4:
case SOCKS5:
return new Proxy(Type.SOCKS, options.getAddress());
case HTTP:
default:
return new Proxy(Type.HTTP, options.getAddress());
}
}
private String getSafeWorkingDirectory() {
if (isWindowsPlatform()) {
if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) {
return null;
}
return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32";
} else {
return DEFAULT_MAC_LINUX_PATH;
}
}
private boolean isWindowsPlatform() {
return System.getProperty("os.name").contains("Windows");
}
private String redactInfo(String regex, String input) {
return input.replaceAll(regex, "****");
}
void openUrl(String url) throws IOException {
Runtime rt = Runtime.getRuntime();
String os = System.getProperty("os.name").toLowerCase(Locale.ROOT);
if (os.contains("win")) {
rt.exec("rundll32 url.dll,FileProtocolHandler " + url);
} else if (os.contains("mac")) {
rt.exec("open " + url);
} else if (os.contains("nix") || os.contains("nux")) {
rt.exec("xdg-open " + url);
} else {
logger.error("Browser could not be opened - please open {} in a browser on this device.", url);
}
}
} |
added 10 seconds timeout | public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) {
String azCommand = "az account get-access-token --output json --resource ";
StringBuilder command = new StringBuilder();
command.append(azCommand);
String scopes = ScopeUtil.scopesToResource(request.getScopes());
try {
ScopeUtil.validateScope(scopes);
} catch (IllegalArgumentException ex) {
return Mono.error(logger.logExceptionAsError(ex));
}
command.append(scopes);
AccessToken token = null;
BufferedReader reader = null;
try {
String starter;
String switcher;
if (isWindowsPlatform()) {
starter = WINDOWS_STARTER;
switcher = WINDOWS_SWITCHER;
} else {
starter = LINUX_MAC_STARTER;
switcher = LINUX_MAC_SWITCHER;
}
ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString());
String workingDirectory = getSafeWorkingDirectory();
if (workingDirectory != null) {
builder.directory(new File(workingDirectory));
} else {
throw logger.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be"
+ " found to execute CLI command from."));
}
builder.redirectErrorStream(true);
Process process = builder.start();
reader = new BufferedReader(new InputStreamReader(process.getInputStream(), "UTF-8"));
String line;
StringBuilder output = new StringBuilder();
while (true) {
line = reader.readLine();
if (line == null) {
break;
}
if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) {
throw logger.logExceptionAsError(
new ClientAuthenticationException("Azure CLI not installed", null));
}
output.append(line);
}
String processOutput = output.toString();
process.waitFor();
if (process.exitValue() != 0) {
if (processOutput.length() > 0) {
String redactedOutput = redactInfo("\"accessToken\": \"(.*?)(\"|$)", processOutput);
throw logger.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null));
} else {
throw logger.logExceptionAsError(
new ClientAuthenticationException("Failed to invoke Azure CLI ", null));
}
}
Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class,
SerializerEncoding.JSON);
String accessToken = objectMap.get("accessToken");
String time = objectMap.get("expiresOn");
String timeToSecond = time.substring(0, time.indexOf("."));
String timeJoinedWithT = String.join("T", timeToSecond.split(" "));
OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME)
.atZone(ZoneId.systemDefault())
.toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC);
token = new IdentityToken(accessToken, expiresOn, options);
} catch (IOException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
} catch (InterruptedException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
} catch (RuntimeException e) {
return Mono.error(logger.logExceptionAsError(e));
} finally {
try {
if (reader != null) {
reader.close();
}
} catch (IOException ex) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(ex)));
}
}
return Mono.just(token);
} | process.waitFor(); | public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) {
String azCommand = "az account get-access-token --output json --resource ";
StringBuilder command = new StringBuilder();
command.append(azCommand);
String scopes = ScopeUtil.scopesToResource(request.getScopes());
try {
ScopeUtil.validateScope(scopes);
} catch (IllegalArgumentException ex) {
return Mono.error(logger.logExceptionAsError(ex));
}
command.append(scopes);
AccessToken token = null;
BufferedReader reader = null;
try {
String starter;
String switcher;
if (isWindowsPlatform()) {
starter = WINDOWS_STARTER;
switcher = WINDOWS_SWITCHER;
} else {
starter = LINUX_MAC_STARTER;
switcher = LINUX_MAC_SWITCHER;
}
ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString());
String workingDirectory = getSafeWorkingDirectory();
if (workingDirectory != null) {
builder.directory(new File(workingDirectory));
} else {
throw logger.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be"
+ " found to execute CLI command from."));
}
builder.redirectErrorStream(true);
Process process = builder.start();
reader = new BufferedReader(new InputStreamReader(process.getInputStream(), "UTF-8"));
String line;
StringBuilder output = new StringBuilder();
while (true) {
line = reader.readLine();
if (line == null) {
break;
}
if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) {
throw logger.logExceptionAsError(
new ClientAuthenticationException("Azure CLI not installed", null));
}
output.append(line);
}
String processOutput = output.toString();
process.waitFor(10, TimeUnit.SECONDS);
if (process.exitValue() != 0) {
if (processOutput.length() > 0) {
String redactedOutput = redactInfo("\"accessToken\": \"(.*?)(\"|$)", processOutput);
throw logger.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null));
} else {
throw logger.logExceptionAsError(
new ClientAuthenticationException("Failed to invoke Azure CLI ", null));
}
}
Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class,
SerializerEncoding.JSON);
String accessToken = objectMap.get("accessToken");
String time = objectMap.get("expiresOn");
String timeToSecond = time.substring(0, time.indexOf("."));
String timeJoinedWithT = String.join("T", timeToSecond.split(" "));
OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME)
.atZone(ZoneId.systemDefault())
.toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC);
token = new IdentityToken(accessToken, expiresOn, options);
} catch (IOException | InterruptedException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
} catch (RuntimeException e) {
return Mono.error(logger.logExceptionAsError(e));
} finally {
try {
if (reader != null) {
reader.close();
}
} catch (IOException ex) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(ex)));
}
}
return Mono.just(token);
} | class IdentityClient {
private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter();
private static final Random RANDOM = new Random();
private static final String WINDOWS_STARTER = "cmd.exe";
private static final String LINUX_MAC_STARTER = "/bin/sh";
private static final String WINDOWS_SWITCHER = "/c";
private static final String LINUX_MAC_SWITCHER = "-c";
private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized";
private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found";
private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot");
private static final String DEFAULT_MAC_LINUX_PATH = "/bin/";
private final ClientLogger logger = new ClientLogger(IdentityClient.class);
private final IdentityClientOptions options;
private final PublicClientApplication publicClientApplication;
private final String tenantId;
private final String clientId;
private HttpPipelineAdapter httpPipelineAdapter;
/**
* Creates an IdentityClient with the given options.
*
* @param tenantId the tenant ID of the application.
* @param clientId the client ID of the application.
* @param options the options configuring the client.
*/
IdentityClient(String tenantId, String clientId, IdentityClientOptions options) {
if (tenantId == null) {
tenantId = "common";
}
if (options == null) {
options = new IdentityClientOptions();
}
this.tenantId = tenantId;
this.clientId = clientId;
this.options = options;
if (clientId == null) {
this.publicClientApplication = null;
} else {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId;
PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId);
try {
publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl);
} catch (MalformedURLException e) {
throw logger.logExceptionAsWarning(new IllegalStateException(e));
}
HttpPipeline httpPipeline = options.getHttpPipeline();
if (httpPipeline != null) {
httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline);
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
} else {
HttpClient httpClient = options.getHttpClient();
if (httpClient != null) {
httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient));
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
} else {
httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault()));
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
}
}
if (options.getExecutorService() != null) {
publicClientApplicationBuilder.executorService(options.getExecutorService());
}
this.publicClientApplication = publicClientApplicationBuilder.build();
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
/**
* Asynchronously acquire a token from Active Directory with a client secret.
*
* @param clientSecret the client secret of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromSecret(clientSecret))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
} catch (MalformedURLException e) {
return Mono.error(e);
}
}
private HttpPipeline setupPipeline(HttpClient httpClient) {
List<HttpPipelinePolicy> policies = new ArrayList<>();
HttpLogOptions httpLogOptions = new HttpLogOptions();
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RetryPolicy());
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new HttpPipelineBuilder().httpClient(httpClient)
.policies(policies.toArray(new HttpPipelinePolicy[0])).build();
}
/**
* Asynchronously acquire a token from Active Directory with a PKCS12 certificate.
*
* @param pfxCertificatePath the path to the PKCS12 certificate of the application
* @param pfxCertificatePassword the password protecting the PFX certificate
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword,
TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return Mono.fromCallable(() -> {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
new FileInputStream(pfxCertificatePath), pfxCertificatePassword))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
return applicationBuilder.build();
}).flatMap(application -> Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes())).build())))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory with a PEM certificate.
*
* @param pemCertificatePath the path to the PEM certificate of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath));
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
CertificateUtil.privateKeyFromPem(pemCertificateBytes),
CertificateUtil.publicKeyFromPem(pemCertificateBytes)))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
} catch (IOException e) {
return Mono.error(e);
}
}
/**
* Asynchronously acquire a token from Active Directory with a username and a password.
*
* @param request the details of the token request
* @param username the username of the user
* @param password the password of the user
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request,
String username, String password) {
return Mono.fromFuture(publicClientApplication.acquireToken(
UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray())
.build()))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) {
SilentParameters parameters;
if (msalToken.getAccount() != null) {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build();
} else {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build();
}
return Mono.defer(() -> {
try {
return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters))
.map(ar -> new MsalToken(ar, options));
} catch (MalformedURLException e) {
return Mono.error(e);
}
});
}
/**
* Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide
* a device code for login and the user must meet the challenge by authenticating in a browser on the current or a
* different device.
*
* @param request the details of the token request
* @param deviceCodeConsumer the user provided closure that will consume the device code challenge
* @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device
* code expires
*/
public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request,
Consumer<DeviceCodeInfo> deviceCodeConsumer) {
return Mono.fromFuture(() -> {
DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()),
dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(),
dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build();
return publicClientApplication.acquireToken(parameters);
}).map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow.
*
* @param request the details of the token request
* @param authorizationCode the oauth2 authorization code
* @param redirectUrl the redirectUrl where the authorization code is sent to
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode,
URI redirectUrl) {
return Mono.fromFuture(() -> publicClientApplication.acquireToken(
AuthorizationCodeParameters.builder(authorizationCode, redirectUrl)
.scopes(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The
* credential will run a minimal local HttpServer at the given port, so {@code http:
* listed as a valid reply URL for the application.
*
* @param request the details of the token request
* @param port the port on which the HTTP server is listening
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return AuthorizationCodeListener.create(port)
.flatMap(server -> {
URI redirectUri;
String browserUri;
try {
redirectUri = new URI(String.format("http:
browserUri =
String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt"
+ "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s",
authorityUrl,
clientId,
redirectUri.toString(),
UUID.randomUUID(),
String.join(" ", request.getScopes()));
} catch (URISyntaxException e) {
return server.dispose().then(Mono.error(e));
}
return server.listen()
.mergeWith(Mono.<String>fromRunnable(() -> {
try {
openUrl(browserUri);
} catch (IOException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
}
}).subscribeOn(Schedulers.newSingle("browser")))
.next()
.flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri))
.onErrorResume(t -> server.dispose().then(Mono.error(t)))
.flatMap(msalToken -> server.dispose().then(Mono.just(msalToken)));
});
}
/**
* Asynchronously acquire a token from the App Service Managed Service Identity endpoint.
*
* @param msiEndpoint the endpoint to acquire token from
* @param msiSecret the secret to acquire token with
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret,
TokenRequestContext request) {
return Mono.fromCallable(() -> {
String resource = ScopeUtil.scopesToResource(request.getScopes());
HttpURLConnection connection = null;
StringBuilder payload = new StringBuilder();
payload.append("resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
payload.append("&api-version=");
payload.append(URLEncoder.encode("2017-09-01", "UTF-8"));
if (clientId != null) {
payload.append("&clientid=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
try {
URL url = new URL(String.format("%s?%s", msiEndpoint, payload));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
if (msiSecret != null) {
connection.setRequestProperty("Secret", msiSecret);
}
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name())
.useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
MSIToken msiToken = SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON);
return new IdentityToken(msiToken.getToken(), msiToken.getExpiresAt(), options);
} finally {
if (connection != null) {
connection.disconnect();
}
}
});
}
/**
* Asynchronously acquire a token from the Virtual Machine IMDS endpoint.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) {
String resource = ScopeUtil.scopesToResource(request.getScopes());
StringBuilder payload = new StringBuilder();
final int imdsUpgradeTimeInMs = 70 * 1000;
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
payload.append("&resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
if (clientId != null) {
payload.append("&client_id=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
} catch (IOException exception) {
return Mono.error(exception);
}
return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> {
int retry = 1;
while (retry <= options.getMaxRetry()) {
URL url = null;
HttpURLConnection connection = null;
try {
url =
new URL(String.format("http:
payload.toString()));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name())
.useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
MSIToken msiToken = SERIALIZER_ADAPTER.deserialize(result,
MSIToken.class, SerializerEncoding.JSON);
return new IdentityToken(msiToken.getToken(), msiToken.getExpiresAt(), options);
} catch (IOException exception) {
if (connection == null) {
throw logger.logExceptionAsError(new RuntimeException(
String.format("Could not connect to the url: %s.", url), exception));
}
int responseCode = connection.getResponseCode();
if (responseCode == 410
|| responseCode == 429
|| responseCode == 404
|| (responseCode >= 500 && responseCode <= 599)) {
int retryTimeoutInMs = options.getRetryTimeout()
.apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000;
retryTimeoutInMs =
(responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs
: retryTimeoutInMs;
retry++;
if (retry > options.getMaxRetry()) {
break;
} else {
sleep(retryTimeoutInMs);
}
} else {
throw logger.logExceptionAsError(new RuntimeException(
"Couldn't acquire access token from IMDS, verify your objectId, "
+ "clientId or msiResourceId", exception));
}
} finally {
if (connection != null) {
connection.disconnect();
}
}
}
throw logger.logExceptionAsError(new RuntimeException(
String.format("MSI: Failed to acquire tokens after retrying %s times",
options.getMaxRetry())));
}));
}
private Mono<Boolean> checkIMDSAvailable() {
StringBuilder payload = new StringBuilder();
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
} catch (IOException exception) {
return Mono.error(exception);
}
return Mono.fromCallable(() -> {
HttpURLConnection connection = null;
URL url = new URL(String.format("http:
payload.toString()));
try {
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setConnectTimeout(500);
connection.connect();
} finally {
if (connection != null) {
connection.disconnect();
}
}
return true;
});
}
private static void sleep(int millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException ex) {
throw new IllegalStateException(ex);
}
}
private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) {
switch (options.getType()) {
case SOCKS4:
case SOCKS5:
return new Proxy(Type.SOCKS, options.getAddress());
case HTTP:
default:
return new Proxy(Type.HTTP, options.getAddress());
}
}
private String getSafeWorkingDirectory() {
if (isWindowsPlatform()) {
if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) {
return null;
}
return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32";
} else {
return DEFAULT_MAC_LINUX_PATH;
}
}
private boolean isWindowsPlatform() {
return System.getProperty("os.name").contains("Windows");
}
private String redactInfo(String regex, String input) {
return input.replaceAll(regex, "****");
}
void openUrl(String url) throws IOException {
Runtime rt = Runtime.getRuntime();
String os = System.getProperty("os.name").toLowerCase(Locale.ROOT);
if (os.contains("win")) {
rt.exec("rundll32 url.dll,FileProtocolHandler " + url);
} else if (os.contains("mac")) {
rt.exec("open " + url);
} else if (os.contains("nix") || os.contains("nux")) {
rt.exec("xdg-open " + url);
} else {
logger.error("Browser could not be opened - please open {} in a browser on this device.", url);
}
}
} | class IdentityClient {
private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter();
private static final Random RANDOM = new Random();
private static final String WINDOWS_STARTER = "cmd.exe";
private static final String LINUX_MAC_STARTER = "/bin/sh";
private static final String WINDOWS_SWITCHER = "/c";
private static final String LINUX_MAC_SWITCHER = "-c";
private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized";
private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found";
private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot");
private static final String DEFAULT_MAC_LINUX_PATH = "/bin/";
private final ClientLogger logger = new ClientLogger(IdentityClient.class);
private final IdentityClientOptions options;
private final PublicClientApplication publicClientApplication;
private final String tenantId;
private final String clientId;
private HttpPipelineAdapter httpPipelineAdapter;
/**
* Creates an IdentityClient with the given options.
*
* @param tenantId the tenant ID of the application.
* @param clientId the client ID of the application.
* @param options the options configuring the client.
*/
IdentityClient(String tenantId, String clientId, IdentityClientOptions options) {
if (tenantId == null) {
tenantId = "common";
}
if (options == null) {
options = new IdentityClientOptions();
}
this.tenantId = tenantId;
this.clientId = clientId;
this.options = options;
if (clientId == null) {
this.publicClientApplication = null;
} else {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId;
PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId);
try {
publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl);
} catch (MalformedURLException e) {
throw logger.logExceptionAsWarning(new IllegalStateException(e));
}
HttpPipeline httpPipeline = options.getHttpPipeline();
if (httpPipeline != null) {
httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline);
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
} else {
HttpClient httpClient = options.getHttpClient();
if (httpClient != null) {
httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient));
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
} else {
httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault()));
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
}
}
if (options.getExecutorService() != null) {
publicClientApplicationBuilder.executorService(options.getExecutorService());
}
this.publicClientApplication = publicClientApplicationBuilder.build();
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
/**
* Asynchronously acquire a token from Active Directory with a client secret.
*
* @param clientSecret the client secret of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromSecret(clientSecret))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
} catch (MalformedURLException e) {
return Mono.error(e);
}
}
private HttpPipeline setupPipeline(HttpClient httpClient) {
List<HttpPipelinePolicy> policies = new ArrayList<>();
HttpLogOptions httpLogOptions = new HttpLogOptions();
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RetryPolicy());
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new HttpPipelineBuilder().httpClient(httpClient)
.policies(policies.toArray(new HttpPipelinePolicy[0])).build();
}
/**
* Asynchronously acquire a token from Active Directory with a PKCS12 certificate.
*
* @param pfxCertificatePath the path to the PKCS12 certificate of the application
* @param pfxCertificatePassword the password protecting the PFX certificate
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword,
TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return Mono.fromCallable(() -> {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
new FileInputStream(pfxCertificatePath), pfxCertificatePassword))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
return applicationBuilder.build();
}).flatMap(application -> Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes())).build())))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory with a PEM certificate.
*
* @param pemCertificatePath the path to the PEM certificate of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath));
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
CertificateUtil.privateKeyFromPem(pemCertificateBytes),
CertificateUtil.publicKeyFromPem(pemCertificateBytes)))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
} catch (IOException e) {
return Mono.error(e);
}
}
/**
* Asynchronously acquire a token from Active Directory with a username and a password.
*
* @param request the details of the token request
* @param username the username of the user
* @param password the password of the user
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request,
String username, String password) {
return Mono.fromFuture(publicClientApplication.acquireToken(
UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray())
.build()))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) {
SilentParameters parameters;
if (msalToken.getAccount() != null) {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build();
} else {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build();
}
return Mono.defer(() -> {
try {
return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters))
.map(ar -> new MsalToken(ar, options));
} catch (MalformedURLException e) {
return Mono.error(e);
}
});
}
/**
* Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide
* a device code for login and the user must meet the challenge by authenticating in a browser on the current or a
* different device.
*
* @param request the details of the token request
* @param deviceCodeConsumer the user provided closure that will consume the device code challenge
* @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device
* code expires
*/
public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request,
Consumer<DeviceCodeInfo> deviceCodeConsumer) {
return Mono.fromFuture(() -> {
DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()),
dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(),
dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build();
return publicClientApplication.acquireToken(parameters);
}).map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow.
*
* @param request the details of the token request
* @param authorizationCode the oauth2 authorization code
* @param redirectUrl the redirectUrl where the authorization code is sent to
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode,
URI redirectUrl) {
return Mono.fromFuture(() -> publicClientApplication.acquireToken(
AuthorizationCodeParameters.builder(authorizationCode, redirectUrl)
.scopes(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The
* credential will run a minimal local HttpServer at the given port, so {@code http:
* listed as a valid reply URL for the application.
*
* @param request the details of the token request
* @param port the port on which the HTTP server is listening
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return AuthorizationCodeListener.create(port)
.flatMap(server -> {
URI redirectUri;
String browserUri;
try {
redirectUri = new URI(String.format("http:
browserUri =
String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt"
+ "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s",
authorityUrl,
clientId,
redirectUri.toString(),
UUID.randomUUID(),
String.join(" ", request.getScopes()));
} catch (URISyntaxException e) {
return server.dispose().then(Mono.error(e));
}
return server.listen()
.mergeWith(Mono.<String>fromRunnable(() -> {
try {
openUrl(browserUri);
} catch (IOException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
}
}).subscribeOn(Schedulers.newSingle("browser")))
.next()
.flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri))
.onErrorResume(t -> server.dispose().then(Mono.error(t)))
.flatMap(msalToken -> server.dispose().then(Mono.just(msalToken)));
});
}
/**
* Asynchronously acquire a token from the App Service Managed Service Identity endpoint.
*
* @param msiEndpoint the endpoint to acquire token from
* @param msiSecret the secret to acquire token with
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret,
TokenRequestContext request) {
return Mono.fromCallable(() -> {
String resource = ScopeUtil.scopesToResource(request.getScopes());
HttpURLConnection connection = null;
StringBuilder payload = new StringBuilder();
payload.append("resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
payload.append("&api-version=");
payload.append(URLEncoder.encode("2017-09-01", "UTF-8"));
if (clientId != null) {
payload.append("&clientid=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
try {
URL url = new URL(String.format("%s?%s", msiEndpoint, payload));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
if (msiSecret != null) {
connection.setRequestProperty("Secret", msiSecret);
}
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name())
.useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
MSIToken msiToken = SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON);
return new IdentityToken(msiToken.getToken(), msiToken.getExpiresAt(), options);
} finally {
if (connection != null) {
connection.disconnect();
}
}
});
}
/**
* Asynchronously acquire a token from the Virtual Machine IMDS endpoint.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) {
String resource = ScopeUtil.scopesToResource(request.getScopes());
StringBuilder payload = new StringBuilder();
final int imdsUpgradeTimeInMs = 70 * 1000;
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
payload.append("&resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
if (clientId != null) {
payload.append("&client_id=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
} catch (IOException exception) {
return Mono.error(exception);
}
return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> {
int retry = 1;
while (retry <= options.getMaxRetry()) {
URL url = null;
HttpURLConnection connection = null;
try {
url =
new URL(String.format("http:
payload.toString()));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name())
.useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
MSIToken msiToken = SERIALIZER_ADAPTER.deserialize(result,
MSIToken.class, SerializerEncoding.JSON);
return new IdentityToken(msiToken.getToken(), msiToken.getExpiresAt(), options);
} catch (IOException exception) {
if (connection == null) {
throw logger.logExceptionAsError(new RuntimeException(
String.format("Could not connect to the url: %s.", url), exception));
}
int responseCode = connection.getResponseCode();
if (responseCode == 410
|| responseCode == 429
|| responseCode == 404
|| (responseCode >= 500 && responseCode <= 599)) {
int retryTimeoutInMs = options.getRetryTimeout()
.apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000;
retryTimeoutInMs =
(responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs
: retryTimeoutInMs;
retry++;
if (retry > options.getMaxRetry()) {
break;
} else {
sleep(retryTimeoutInMs);
}
} else {
throw logger.logExceptionAsError(new RuntimeException(
"Couldn't acquire access token from IMDS, verify your objectId, "
+ "clientId or msiResourceId", exception));
}
} finally {
if (connection != null) {
connection.disconnect();
}
}
}
throw logger.logExceptionAsError(new RuntimeException(
String.format("MSI: Failed to acquire tokens after retrying %s times",
options.getMaxRetry())));
}));
}
private Mono<Boolean> checkIMDSAvailable() {
StringBuilder payload = new StringBuilder();
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
} catch (IOException exception) {
return Mono.error(exception);
}
return Mono.fromCallable(() -> {
HttpURLConnection connection = null;
URL url = new URL(String.format("http:
payload.toString()));
try {
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setConnectTimeout(500);
connection.connect();
} finally {
if (connection != null) {
connection.disconnect();
}
}
return true;
});
}
private static void sleep(int millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException ex) {
throw new IllegalStateException(ex);
}
}
private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) {
switch (options.getType()) {
case SOCKS4:
case SOCKS5:
return new Proxy(Type.SOCKS, options.getAddress());
case HTTP:
default:
return new Proxy(Type.HTTP, options.getAddress());
}
}
private String getSafeWorkingDirectory() {
if (isWindowsPlatform()) {
if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) {
return null;
}
return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32";
} else {
return DEFAULT_MAC_LINUX_PATH;
}
}
private boolean isWindowsPlatform() {
return System.getProperty("os.name").contains("Windows");
}
private String redactInfo(String regex, String input) {
return input.replaceAll(regex, "****");
}
void openUrl(String url) throws IOException {
Runtime rt = Runtime.getRuntime();
String os = System.getProperty("os.name").toLowerCase(Locale.ROOT);
if (os.contains("win")) {
rt.exec("rundll32 url.dll,FileProtocolHandler " + url);
} else if (os.contains("mac")) {
rt.exec("open " + url);
} else if (os.contains("nix") || os.contains("nux")) {
rt.exec("xdg-open " + url);
} else {
logger.error("Browser could not be opened - please open {} in a browser on this device.", url);
}
}
} |
Is there a need to have a short lived clone of the global configuration? If anything it may be better to use it directly as it could load into static memory the environment setting and will be accessed quicker in the future. | public IdentityClientOptions() {
Configuration configuration = Configuration.getGlobalConfiguration().clone();
authorityHost = configuration.contains(configuration.PROPERTY_AZURE_AUTHORITY_HOST)
? configuration.get(configuration.PROPERTY_AZURE_AUTHORITY_HOST) : DEFAULT_AUTHORITY_HOST;
maxRetry = MAX_RETRY_DEFAULT_LIMIT;
retryTimeout = i -> Duration.ofSeconds((long) Math.pow(2, i.getSeconds() - 1));
} | Configuration configuration = Configuration.getGlobalConfiguration().clone(); | public IdentityClientOptions() {
Configuration configuration = Configuration.getGlobalConfiguration();
authorityHost = configuration.get(Configuration.PROPERTY_AZURE_AUTHORITY_HOST, KnownAuthorityHosts.AZURE_CLOUD);
maxRetry = MAX_RETRY_DEFAULT_LIMIT;
retryTimeout = i -> Duration.ofSeconds((long) Math.pow(2, i.getSeconds() - 1));
} | class IdentityClientOptions {
private static final String DEFAULT_AUTHORITY_HOST = "https:
private static final int MAX_RETRY_DEFAULT_LIMIT = 3;
private String authorityHost;
private int maxRetry;
private Function<Duration, Duration> retryTimeout;
private ProxyOptions proxyOptions;
private HttpPipeline httpPipeline;
/**
* Creates an instance of IdentityClientOptions with default settings.
*/
/**
* @return the Azure Active Directory endpoint to acquire tokens.
*/
public String getAuthorityHost() {
return authorityHost;
}
/**
* Specifies the Azure Active Directory endpoint to acquire tokens.
* @param authorityHost the Azure Active Directory endpoint
* @return IdentityClientOptions
*/
public IdentityClientOptions setAuthorityHost(String authorityHost) {
this.authorityHost = authorityHost;
return this;
}
/**
* @return the max number of retries when an authentication request fails.
*/
public int getMaxRetry() {
return maxRetry;
}
/**
* Specifies the max number of retries when an authentication request fails.
* @param maxRetry the number of retries
* @return IdentityClientOptions
*/
public IdentityClientOptions setMaxRetry(int maxRetry) {
this.maxRetry = maxRetry;
return this;
}
/**
* @return a Function to calculate seconds of timeout on every retried request.
*/
public Function<Duration, Duration> getRetryTimeout() {
return retryTimeout;
}
/**
* Specifies a Function to calculate seconds of timeout on every retried request.
* @param retryTimeout the Function that returns a timeout in seconds given the number of retry
* @return IdentityClientOptions
*/
public IdentityClientOptions setRetryTimeout(Function<Duration, Duration> retryTimeout) {
this.retryTimeout = retryTimeout;
return this;
}
/**
* @return the options for proxy configuration.
*/
public ProxyOptions getProxyOptions() {
return proxyOptions;
}
/**
* Specifies the options for proxy configuration.
* @param proxyOptions the options for proxy configuration
* @return IdentityClientOptions
*/
public IdentityClientOptions setProxyOptions(ProxyOptions proxyOptions) {
this.proxyOptions = proxyOptions;
return this;
}
/**
* @return the HttpPipeline to send all requests
*/
public HttpPipeline getHttpPipeline() {
return httpPipeline;
}
/**
* Specifies the HttpPipeline to send all requests. This setting overrides the others.
* @param httpPipeline the HttpPipeline to send all requests
* @return IdentityClientOptions
*/
public IdentityClientOptions setHttpPipeline(HttpPipeline httpPipeline) {
this.httpPipeline = httpPipeline;
return this;
}
} | class IdentityClientOptions {
private static final int MAX_RETRY_DEFAULT_LIMIT = 3;
private String authorityHost;
private int maxRetry;
private Function<Duration, Duration> retryTimeout;
private ProxyOptions proxyOptions;
private HttpPipeline httpPipeline;
private ExecutorService executorService;
private Duration tokenRefreshOffset = Duration.ofMinutes(2);
private HttpClient httpClient;
/**
* Creates an instance of IdentityClientOptions with default settings.
*/
/**
* @return the Azure Active Directory endpoint to acquire tokens.
*/
public String getAuthorityHost() {
return authorityHost;
}
/**
* Specifies the Azure Active Directory endpoint to acquire tokens.
* @param authorityHost the Azure Active Directory endpoint
* @return IdentityClientOptions
*/
public IdentityClientOptions setAuthorityHost(String authorityHost) {
this.authorityHost = authorityHost;
return this;
}
/**
* @return the max number of retries when an authentication request fails.
*/
public int getMaxRetry() {
return maxRetry;
}
/**
* Specifies the max number of retries when an authentication request fails.
* @param maxRetry the number of retries
* @return IdentityClientOptions
*/
public IdentityClientOptions setMaxRetry(int maxRetry) {
this.maxRetry = maxRetry;
return this;
}
/**
* @return a Function to calculate seconds of timeout on every retried request.
*/
public Function<Duration, Duration> getRetryTimeout() {
return retryTimeout;
}
/**
* Specifies a Function to calculate seconds of timeout on every retried request.
* @param retryTimeout the Function that returns a timeout in seconds given the number of retry
* @return IdentityClientOptions
*/
public IdentityClientOptions setRetryTimeout(Function<Duration, Duration> retryTimeout) {
this.retryTimeout = retryTimeout;
return this;
}
/**
* @return the options for proxy configuration.
*/
public ProxyOptions getProxyOptions() {
return proxyOptions;
}
/**
* Specifies the options for proxy configuration.
* @param proxyOptions the options for proxy configuration
* @return IdentityClientOptions
*/
public IdentityClientOptions setProxyOptions(ProxyOptions proxyOptions) {
this.proxyOptions = proxyOptions;
return this;
}
/**
* @return the HttpPipeline to send all requests
*/
public HttpPipeline getHttpPipeline() {
return httpPipeline;
}
/**
* @return the HttpClient to use for requests
*/
public HttpClient getHttpClient() {
return httpClient;
}
/**
* Specifies the HttpPipeline to send all requests. This setting overrides the others.
* @param httpPipeline the HttpPipeline to send all requests
* @return IdentityClientOptions
*/
public IdentityClientOptions setHttpPipeline(HttpPipeline httpPipeline) {
this.httpPipeline = httpPipeline;
return this;
}
/**
* Specifies the ExecutorService to be used to execute the authentication requests.
* Developer is responsible for maintaining the lifecycle of the ExecutorService.
*
* <p>
* If this is not configured, the {@link ForkJoinPool
* also shared with other application tasks. If the common pool is heavily used for other tasks, authentication
* requests might starve and setting up this executor service should be considered.
* </p>
*
* <p> The executor service and can be safely shutdown if the TokenCredential is no longer being used by the
* Azure SDK clients and should be shutdown before the application exits. </p>
*
* @param executorService the executor service to use for executing authentication requests.
* @return IdentityClientOptions
*/
public IdentityClientOptions setExecutorService(ExecutorService executorService) {
this.executorService = executorService;
return this;
}
/**
* @return the ExecutorService to execute authentication requests.
*/
public ExecutorService getExecutorService() {
return executorService;
}
/**
* @return how long before the actual token expiry to refresh the token.
*/
public Duration getTokenRefreshOffset() {
return tokenRefreshOffset;
}
/**
* Sets how long before the actual token expiry to refresh the token. The
* token will be considered expired at and after the time of (actual
* expiry - token refresh offset). The default offset is 2 minutes.
*
* This is useful when network is congested and a request containing the
* token takes longer than normal to get to the server.
*
* @param tokenRefreshOffset the duration before the actual expiry of a token to refresh it
* @return IdentityClientOptions
* @throws NullPointerException If {@code tokenRefreshOffset} is null.
*/
public IdentityClientOptions setTokenRefreshOffset(Duration tokenRefreshOffset) {
Objects.requireNonNull(tokenRefreshOffset, "The token refresh offset cannot be null.");
this.tokenRefreshOffset = tokenRefreshOffset;
return this;
}
/**
* Specifies the HttpClient to send use for requests.
* @param httpClient the http client to use for requests
* @return IdentityClientOptions
*/
public IdentityClientOptions setHttpClient(HttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
} |
`Configuration` has a `T get(String configurationName, T defaultValue)` method that could simplify this logic. ```java authorityHost = configuration.get(Configuration.PROPERTY_AZURE_AUTHORITY_HOST, DEFAULT_AUTHORITY_HOST); ``` | public IdentityClientOptions() {
Configuration configuration = Configuration.getGlobalConfiguration().clone();
authorityHost = configuration.contains(configuration.PROPERTY_AZURE_AUTHORITY_HOST)
? configuration.get(configuration.PROPERTY_AZURE_AUTHORITY_HOST) : DEFAULT_AUTHORITY_HOST;
maxRetry = MAX_RETRY_DEFAULT_LIMIT;
retryTimeout = i -> Duration.ofSeconds((long) Math.pow(2, i.getSeconds() - 1));
} | ? configuration.get(configuration.PROPERTY_AZURE_AUTHORITY_HOST) : DEFAULT_AUTHORITY_HOST; | public IdentityClientOptions() {
Configuration configuration = Configuration.getGlobalConfiguration();
authorityHost = configuration.get(Configuration.PROPERTY_AZURE_AUTHORITY_HOST, KnownAuthorityHosts.AZURE_CLOUD);
maxRetry = MAX_RETRY_DEFAULT_LIMIT;
retryTimeout = i -> Duration.ofSeconds((long) Math.pow(2, i.getSeconds() - 1));
} | class IdentityClientOptions {
private static final String DEFAULT_AUTHORITY_HOST = "https:
private static final int MAX_RETRY_DEFAULT_LIMIT = 3;
private String authorityHost;
private int maxRetry;
private Function<Duration, Duration> retryTimeout;
private ProxyOptions proxyOptions;
private HttpPipeline httpPipeline;
/**
* Creates an instance of IdentityClientOptions with default settings.
*/
/**
* @return the Azure Active Directory endpoint to acquire tokens.
*/
public String getAuthorityHost() {
return authorityHost;
}
/**
* Specifies the Azure Active Directory endpoint to acquire tokens.
* @param authorityHost the Azure Active Directory endpoint
* @return IdentityClientOptions
*/
public IdentityClientOptions setAuthorityHost(String authorityHost) {
this.authorityHost = authorityHost;
return this;
}
/**
* @return the max number of retries when an authentication request fails.
*/
public int getMaxRetry() {
return maxRetry;
}
/**
* Specifies the max number of retries when an authentication request fails.
* @param maxRetry the number of retries
* @return IdentityClientOptions
*/
public IdentityClientOptions setMaxRetry(int maxRetry) {
this.maxRetry = maxRetry;
return this;
}
/**
* @return a Function to calculate seconds of timeout on every retried request.
*/
public Function<Duration, Duration> getRetryTimeout() {
return retryTimeout;
}
/**
* Specifies a Function to calculate seconds of timeout on every retried request.
* @param retryTimeout the Function that returns a timeout in seconds given the number of retry
* @return IdentityClientOptions
*/
public IdentityClientOptions setRetryTimeout(Function<Duration, Duration> retryTimeout) {
this.retryTimeout = retryTimeout;
return this;
}
/**
* @return the options for proxy configuration.
*/
public ProxyOptions getProxyOptions() {
return proxyOptions;
}
/**
* Specifies the options for proxy configuration.
* @param proxyOptions the options for proxy configuration
* @return IdentityClientOptions
*/
public IdentityClientOptions setProxyOptions(ProxyOptions proxyOptions) {
this.proxyOptions = proxyOptions;
return this;
}
/**
* @return the HttpPipeline to send all requests
*/
public HttpPipeline getHttpPipeline() {
return httpPipeline;
}
/**
* Specifies the HttpPipeline to send all requests. This setting overrides the others.
* @param httpPipeline the HttpPipeline to send all requests
* @return IdentityClientOptions
*/
public IdentityClientOptions setHttpPipeline(HttpPipeline httpPipeline) {
this.httpPipeline = httpPipeline;
return this;
}
} | class IdentityClientOptions {
private static final int MAX_RETRY_DEFAULT_LIMIT = 3;
private String authorityHost;
private int maxRetry;
private Function<Duration, Duration> retryTimeout;
private ProxyOptions proxyOptions;
private HttpPipeline httpPipeline;
private ExecutorService executorService;
private Duration tokenRefreshOffset = Duration.ofMinutes(2);
private HttpClient httpClient;
/**
* Creates an instance of IdentityClientOptions with default settings.
*/
/**
* @return the Azure Active Directory endpoint to acquire tokens.
*/
public String getAuthorityHost() {
return authorityHost;
}
/**
* Specifies the Azure Active Directory endpoint to acquire tokens.
* @param authorityHost the Azure Active Directory endpoint
* @return IdentityClientOptions
*/
public IdentityClientOptions setAuthorityHost(String authorityHost) {
this.authorityHost = authorityHost;
return this;
}
/**
* @return the max number of retries when an authentication request fails.
*/
public int getMaxRetry() {
return maxRetry;
}
/**
* Specifies the max number of retries when an authentication request fails.
* @param maxRetry the number of retries
* @return IdentityClientOptions
*/
public IdentityClientOptions setMaxRetry(int maxRetry) {
this.maxRetry = maxRetry;
return this;
}
/**
* @return a Function to calculate seconds of timeout on every retried request.
*/
public Function<Duration, Duration> getRetryTimeout() {
return retryTimeout;
}
/**
* Specifies a Function to calculate seconds of timeout on every retried request.
* @param retryTimeout the Function that returns a timeout in seconds given the number of retry
* @return IdentityClientOptions
*/
public IdentityClientOptions setRetryTimeout(Function<Duration, Duration> retryTimeout) {
this.retryTimeout = retryTimeout;
return this;
}
/**
* @return the options for proxy configuration.
*/
public ProxyOptions getProxyOptions() {
return proxyOptions;
}
/**
* Specifies the options for proxy configuration.
* @param proxyOptions the options for proxy configuration
* @return IdentityClientOptions
*/
public IdentityClientOptions setProxyOptions(ProxyOptions proxyOptions) {
this.proxyOptions = proxyOptions;
return this;
}
/**
* @return the HttpPipeline to send all requests
*/
public HttpPipeline getHttpPipeline() {
return httpPipeline;
}
/**
* @return the HttpClient to use for requests
*/
public HttpClient getHttpClient() {
return httpClient;
}
/**
* Specifies the HttpPipeline to send all requests. This setting overrides the others.
* @param httpPipeline the HttpPipeline to send all requests
* @return IdentityClientOptions
*/
public IdentityClientOptions setHttpPipeline(HttpPipeline httpPipeline) {
this.httpPipeline = httpPipeline;
return this;
}
/**
* Specifies the ExecutorService to be used to execute the authentication requests.
* Developer is responsible for maintaining the lifecycle of the ExecutorService.
*
* <p>
* If this is not configured, the {@link ForkJoinPool
* also shared with other application tasks. If the common pool is heavily used for other tasks, authentication
* requests might starve and setting up this executor service should be considered.
* </p>
*
* <p> The executor service and can be safely shutdown if the TokenCredential is no longer being used by the
* Azure SDK clients and should be shutdown before the application exits. </p>
*
* @param executorService the executor service to use for executing authentication requests.
* @return IdentityClientOptions
*/
public IdentityClientOptions setExecutorService(ExecutorService executorService) {
this.executorService = executorService;
return this;
}
/**
* @return the ExecutorService to execute authentication requests.
*/
public ExecutorService getExecutorService() {
return executorService;
}
/**
* @return how long before the actual token expiry to refresh the token.
*/
public Duration getTokenRefreshOffset() {
return tokenRefreshOffset;
}
/**
* Sets how long before the actual token expiry to refresh the token. The
* token will be considered expired at and after the time of (actual
* expiry - token refresh offset). The default offset is 2 minutes.
*
* This is useful when network is congested and a request containing the
* token takes longer than normal to get to the server.
*
* @param tokenRefreshOffset the duration before the actual expiry of a token to refresh it
* @return IdentityClientOptions
* @throws NullPointerException If {@code tokenRefreshOffset} is null.
*/
public IdentityClientOptions setTokenRefreshOffset(Duration tokenRefreshOffset) {
Objects.requireNonNull(tokenRefreshOffset, "The token refresh offset cannot be null.");
this.tokenRefreshOffset = tokenRefreshOffset;
return this;
}
/**
* Specifies the HttpClient to send use for requests.
* @param httpClient the http client to use for requests
* @return IdentityClientOptions
*/
public IdentityClientOptions setHttpClient(HttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
} |
In Arrange, Act, and Assert, this line would be "Act". And the Line 135 would be "Assert" | void createsMessageBatchWithSize() {
int batchSize = 1024;
final CreateBatchOptions options = new CreateBatchOptions().setMaximumSizeInBytes(batchSize);
ServiceBusMessageBatch batch = new ServiceBusMessageBatch(batchSize, null, null,
null);
when(asyncSender.createBatch(options)).thenReturn(Mono.just(batch));
ServiceBusMessageBatch messageBatch = sender.createBatch(options);
Assertions.assertEquals(batch, messageBatch);
} | ServiceBusMessageBatch messageBatch = sender.createBatch(options); | void createsMessageBatchWithSize() {
int batchSize = 1024;
final CreateBatchOptions options = new CreateBatchOptions().setMaximumSizeInBytes(batchSize);
final ServiceBusMessageBatch batch = new ServiceBusMessageBatch(batchSize, null, null,
null);
when(asyncSender.createBatch(options)).thenReturn(Mono.just(batch));
ServiceBusMessageBatch messageBatch = sender.createBatch(options);
Assertions.assertEquals(batch, messageBatch);
} | class ServiceBusSenderClientTest {
private static final String NAMESPACE = "my-namespace";
private static final String ENTITY_NAME = "my-servicebus-entity";
@Mock
private ServiceBusSenderAsyncClient asyncSender;
@Captor
private ArgumentCaptor<ServiceBusMessage> singleMessageCaptor;
private ServiceBusSenderClient sender;
private static final Duration RETRY_TIMEOUT = Duration.ofSeconds(10);
private static final String TEST_CONTENTS = "My message for service bus queue!";
@BeforeAll
static void beforeAll() {
StepVerifier.setDefaultTimeout(Duration.ofSeconds(30));
}
@AfterAll
static void afterAll() {
StepVerifier.resetDefaultTimeout();
}
@BeforeEach
void setup() {
MockitoAnnotations.initMocks(this);
when(asyncSender.getEntityPath()).thenReturn(ENTITY_NAME);
when(asyncSender.getFullyQualifiedNamespace()).thenReturn(NAMESPACE);
sender = new ServiceBusSenderClient(asyncSender, RETRY_TIMEOUT);
}
@AfterEach
void teardown() {
sender.close();
singleMessageCaptor = null;
Mockito.framework().clearInlineMocks();
}
@Test
void verifyProperties() {
Assertions.assertEquals(ENTITY_NAME, sender.getEntityPath());
Assertions.assertEquals(NAMESPACE, sender.getFullyQualifiedNamespace());
}
/**
* Verifies that an exception is thrown when we create a batch with null options.
*/
@Test
void createBatchNull() {
Assertions.assertThrows(NullPointerException.class, () -> sender.createBatch(null));
}
/**
* Verifies that the default batch is the same size as the message link.
*/
@Test
void createBatchDefault() {
ServiceBusMessageBatch batch = new ServiceBusMessageBatch(MAX_MESSAGE_LENGTH_BYTES, null, null,
null);
when(asyncSender.createBatch()).thenReturn(Mono.just(batch));
ServiceBusMessageBatch batchMessage = sender.createBatch();
Assertions.assertEquals(MAX_MESSAGE_LENGTH_BYTES, batchMessage.getMaxSizeInBytes());
Assertions.assertEquals(0, batchMessage.getCount());
verify(asyncSender).createBatch();
}
/**
* Verifies we cannot create a batch if the options size is larger than the link.
*/
@Test
void createBatchWhenSizeTooBigThanOnSendLink() {
int maxLinkSize = 1024;
int batchSize = maxLinkSize + 10;
final CreateBatchOptions options = new CreateBatchOptions().setMaximumSizeInBytes(batchSize);
when(asyncSender.createBatch(options)).thenThrow(new IllegalArgumentException("too large size"));
Assertions.assertThrows(IllegalArgumentException.class, () -> sender.createBatch(options));
verify(asyncSender, times(1)).createBatch(options);
}
/**
* Verifies that the producer can create a batch with a given {@link CreateBatchOptions
*/
@Test
/**
* Verifies that sending a single message will result in calling sender.send(Message).
*/
@Test
void sendSingleMessage() {
final ServiceBusMessage testData =
new ServiceBusMessage(TEST_CONTENTS.getBytes(UTF_8));
when(asyncSender.send(testData)).thenReturn(Mono.empty());
sender.send(testData);
verify(asyncSender, times(1)).send(testData);
verify(asyncSender).send(singleMessageCaptor.capture());
final ServiceBusMessage message = singleMessageCaptor.getValue();
Assertions.assertArrayEquals(testData.getBody(), message.getBody());
}
} | class ServiceBusSenderClientTest {
private static final String NAMESPACE = "my-namespace";
private static final String ENTITY_NAME = "my-servicebus-entity";
@Mock
private ServiceBusSenderAsyncClient asyncSender;
@Captor
private ArgumentCaptor<ServiceBusMessage> singleMessageCaptor;
private ServiceBusSenderClient sender;
private static final Duration RETRY_TIMEOUT = Duration.ofSeconds(10);
private static final String TEST_CONTENTS = "My message for service bus queue!";
@BeforeAll
static void beforeAll() {
StepVerifier.setDefaultTimeout(Duration.ofSeconds(30));
}
@AfterAll
static void afterAll() {
StepVerifier.resetDefaultTimeout();
}
@BeforeEach
void setup() {
MockitoAnnotations.initMocks(this);
when(asyncSender.getEntityPath()).thenReturn(ENTITY_NAME);
when(asyncSender.getFullyQualifiedNamespace()).thenReturn(NAMESPACE);
sender = new ServiceBusSenderClient(asyncSender, RETRY_TIMEOUT);
}
@AfterEach
void teardown() {
sender.close();
singleMessageCaptor = null;
Mockito.framework().clearInlineMocks();
}
@Test
void verifyProperties() {
Assertions.assertEquals(ENTITY_NAME, sender.getEntityPath());
Assertions.assertEquals(NAMESPACE, sender.getFullyQualifiedNamespace());
}
/**
* Verifies that an exception is thrown when we create a batch with null options.
*/
@Test
void createBatchNull() {
Assertions.assertThrows(NullPointerException.class, () -> sender.createBatch(null));
}
/**
* Verifies that the default batch is the same size as the message link.
*/
@Test
void createBatchDefault() {
ServiceBusMessageBatch batch = new ServiceBusMessageBatch(MAX_MESSAGE_LENGTH_BYTES, null, null,
null);
when(asyncSender.createBatch()).thenReturn(Mono.just(batch));
ServiceBusMessageBatch batchMessage = sender.createBatch();
Assertions.assertEquals(MAX_MESSAGE_LENGTH_BYTES, batchMessage.getMaxSizeInBytes());
Assertions.assertEquals(0, batchMessage.getCount());
verify(asyncSender).createBatch();
}
/**
* Verifies we cannot create a batch if the options size is larger than the link.
*/
@Test
void createBatchWhenSizeTooBigThanOnSendLink() {
int maxLinkSize = 1024;
int batchSize = maxLinkSize + 10;
final CreateBatchOptions options = new CreateBatchOptions().setMaximumSizeInBytes(batchSize);
when(asyncSender.createBatch(options)).thenThrow(new IllegalArgumentException("too large size"));
Assertions.assertThrows(IllegalArgumentException.class, () -> sender.createBatch(options));
verify(asyncSender, times(1)).createBatch(options);
}
/**
* Verifies that the producer can create a batch with a given {@link CreateBatchOptions
*/
@Test
/**
* Verifies that sending a single message will result in calling sender.send(Message).
*/
@Test
void sendSingleMessage() {
final ServiceBusMessage testData =
new ServiceBusMessage(TEST_CONTENTS.getBytes(UTF_8));
when(asyncSender.send(testData)).thenReturn(Mono.empty());
sender.send(testData);
verify(asyncSender, times(1)).send(testData);
verify(asyncSender).send(singleMessageCaptor.capture());
final ServiceBusMessage message = singleMessageCaptor.getValue();
Assertions.assertArrayEquals(testData.getBody(), message.getBody());
}
} |
I followed they way in SharedTokenCacheCredential,it cloned a configuration in its constructor. | public IdentityClientOptions() {
Configuration configuration = Configuration.getGlobalConfiguration().clone();
authorityHost = configuration.contains(configuration.PROPERTY_AZURE_AUTHORITY_HOST)
? configuration.get(configuration.PROPERTY_AZURE_AUTHORITY_HOST) : DEFAULT_AUTHORITY_HOST;
maxRetry = MAX_RETRY_DEFAULT_LIMIT;
retryTimeout = i -> Duration.ofSeconds((long) Math.pow(2, i.getSeconds() - 1));
} | Configuration configuration = Configuration.getGlobalConfiguration().clone(); | public IdentityClientOptions() {
Configuration configuration = Configuration.getGlobalConfiguration();
authorityHost = configuration.get(Configuration.PROPERTY_AZURE_AUTHORITY_HOST, KnownAuthorityHosts.AZURE_CLOUD);
maxRetry = MAX_RETRY_DEFAULT_LIMIT;
retryTimeout = i -> Duration.ofSeconds((long) Math.pow(2, i.getSeconds() - 1));
} | class IdentityClientOptions {
private static final String DEFAULT_AUTHORITY_HOST = "https:
private static final int MAX_RETRY_DEFAULT_LIMIT = 3;
private String authorityHost;
private int maxRetry;
private Function<Duration, Duration> retryTimeout;
private ProxyOptions proxyOptions;
private HttpPipeline httpPipeline;
/**
* Creates an instance of IdentityClientOptions with default settings.
*/
/**
* @return the Azure Active Directory endpoint to acquire tokens.
*/
public String getAuthorityHost() {
return authorityHost;
}
/**
* Specifies the Azure Active Directory endpoint to acquire tokens.
* @param authorityHost the Azure Active Directory endpoint
* @return IdentityClientOptions
*/
public IdentityClientOptions setAuthorityHost(String authorityHost) {
this.authorityHost = authorityHost;
return this;
}
/**
* @return the max number of retries when an authentication request fails.
*/
public int getMaxRetry() {
return maxRetry;
}
/**
* Specifies the max number of retries when an authentication request fails.
* @param maxRetry the number of retries
* @return IdentityClientOptions
*/
public IdentityClientOptions setMaxRetry(int maxRetry) {
this.maxRetry = maxRetry;
return this;
}
/**
* @return a Function to calculate seconds of timeout on every retried request.
*/
public Function<Duration, Duration> getRetryTimeout() {
return retryTimeout;
}
/**
* Specifies a Function to calculate seconds of timeout on every retried request.
* @param retryTimeout the Function that returns a timeout in seconds given the number of retry
* @return IdentityClientOptions
*/
public IdentityClientOptions setRetryTimeout(Function<Duration, Duration> retryTimeout) {
this.retryTimeout = retryTimeout;
return this;
}
/**
* @return the options for proxy configuration.
*/
public ProxyOptions getProxyOptions() {
return proxyOptions;
}
/**
* Specifies the options for proxy configuration.
* @param proxyOptions the options for proxy configuration
* @return IdentityClientOptions
*/
public IdentityClientOptions setProxyOptions(ProxyOptions proxyOptions) {
this.proxyOptions = proxyOptions;
return this;
}
/**
* @return the HttpPipeline to send all requests
*/
public HttpPipeline getHttpPipeline() {
return httpPipeline;
}
/**
* Specifies the HttpPipeline to send all requests. This setting overrides the others.
* @param httpPipeline the HttpPipeline to send all requests
* @return IdentityClientOptions
*/
public IdentityClientOptions setHttpPipeline(HttpPipeline httpPipeline) {
this.httpPipeline = httpPipeline;
return this;
}
} | class IdentityClientOptions {
private static final int MAX_RETRY_DEFAULT_LIMIT = 3;
private String authorityHost;
private int maxRetry;
private Function<Duration, Duration> retryTimeout;
private ProxyOptions proxyOptions;
private HttpPipeline httpPipeline;
private ExecutorService executorService;
private Duration tokenRefreshOffset = Duration.ofMinutes(2);
private HttpClient httpClient;
/**
* Creates an instance of IdentityClientOptions with default settings.
*/
/**
* @return the Azure Active Directory endpoint to acquire tokens.
*/
public String getAuthorityHost() {
return authorityHost;
}
/**
* Specifies the Azure Active Directory endpoint to acquire tokens.
* @param authorityHost the Azure Active Directory endpoint
* @return IdentityClientOptions
*/
public IdentityClientOptions setAuthorityHost(String authorityHost) {
this.authorityHost = authorityHost;
return this;
}
/**
* @return the max number of retries when an authentication request fails.
*/
public int getMaxRetry() {
return maxRetry;
}
/**
* Specifies the max number of retries when an authentication request fails.
* @param maxRetry the number of retries
* @return IdentityClientOptions
*/
public IdentityClientOptions setMaxRetry(int maxRetry) {
this.maxRetry = maxRetry;
return this;
}
/**
* @return a Function to calculate seconds of timeout on every retried request.
*/
public Function<Duration, Duration> getRetryTimeout() {
return retryTimeout;
}
/**
* Specifies a Function to calculate seconds of timeout on every retried request.
* @param retryTimeout the Function that returns a timeout in seconds given the number of retry
* @return IdentityClientOptions
*/
public IdentityClientOptions setRetryTimeout(Function<Duration, Duration> retryTimeout) {
this.retryTimeout = retryTimeout;
return this;
}
/**
* @return the options for proxy configuration.
*/
public ProxyOptions getProxyOptions() {
return proxyOptions;
}
/**
* Specifies the options for proxy configuration.
* @param proxyOptions the options for proxy configuration
* @return IdentityClientOptions
*/
public IdentityClientOptions setProxyOptions(ProxyOptions proxyOptions) {
this.proxyOptions = proxyOptions;
return this;
}
/**
* @return the HttpPipeline to send all requests
*/
public HttpPipeline getHttpPipeline() {
return httpPipeline;
}
/**
* @return the HttpClient to use for requests
*/
public HttpClient getHttpClient() {
return httpClient;
}
/**
* Specifies the HttpPipeline to send all requests. This setting overrides the others.
* @param httpPipeline the HttpPipeline to send all requests
* @return IdentityClientOptions
*/
public IdentityClientOptions setHttpPipeline(HttpPipeline httpPipeline) {
this.httpPipeline = httpPipeline;
return this;
}
/**
* Specifies the ExecutorService to be used to execute the authentication requests.
* Developer is responsible for maintaining the lifecycle of the ExecutorService.
*
* <p>
* If this is not configured, the {@link ForkJoinPool
* also shared with other application tasks. If the common pool is heavily used for other tasks, authentication
* requests might starve and setting up this executor service should be considered.
* </p>
*
* <p> The executor service and can be safely shutdown if the TokenCredential is no longer being used by the
* Azure SDK clients and should be shutdown before the application exits. </p>
*
* @param executorService the executor service to use for executing authentication requests.
* @return IdentityClientOptions
*/
public IdentityClientOptions setExecutorService(ExecutorService executorService) {
this.executorService = executorService;
return this;
}
/**
* @return the ExecutorService to execute authentication requests.
*/
public ExecutorService getExecutorService() {
return executorService;
}
/**
* @return how long before the actual token expiry to refresh the token.
*/
public Duration getTokenRefreshOffset() {
return tokenRefreshOffset;
}
/**
* Sets how long before the actual token expiry to refresh the token. The
* token will be considered expired at and after the time of (actual
* expiry - token refresh offset). The default offset is 2 minutes.
*
* This is useful when network is congested and a request containing the
* token takes longer than normal to get to the server.
*
* @param tokenRefreshOffset the duration before the actual expiry of a token to refresh it
* @return IdentityClientOptions
* @throws NullPointerException If {@code tokenRefreshOffset} is null.
*/
public IdentityClientOptions setTokenRefreshOffset(Duration tokenRefreshOffset) {
Objects.requireNonNull(tokenRefreshOffset, "The token refresh offset cannot be null.");
this.tokenRefreshOffset = tokenRefreshOffset;
return this;
}
/**
* Specifies the HttpClient to send use for requests.
* @param httpClient the http client to use for requests
* @return IdentityClientOptions
*/
public IdentityClientOptions setHttpClient(HttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
} |
Removed clone. | public IdentityClientOptions() {
Configuration configuration = Configuration.getGlobalConfiguration().clone();
authorityHost = configuration.contains(configuration.PROPERTY_AZURE_AUTHORITY_HOST)
? configuration.get(configuration.PROPERTY_AZURE_AUTHORITY_HOST) : DEFAULT_AUTHORITY_HOST;
maxRetry = MAX_RETRY_DEFAULT_LIMIT;
retryTimeout = i -> Duration.ofSeconds((long) Math.pow(2, i.getSeconds() - 1));
} | Configuration configuration = Configuration.getGlobalConfiguration().clone(); | public IdentityClientOptions() {
Configuration configuration = Configuration.getGlobalConfiguration();
authorityHost = configuration.get(Configuration.PROPERTY_AZURE_AUTHORITY_HOST, KnownAuthorityHosts.AZURE_CLOUD);
maxRetry = MAX_RETRY_DEFAULT_LIMIT;
retryTimeout = i -> Duration.ofSeconds((long) Math.pow(2, i.getSeconds() - 1));
} | class IdentityClientOptions {
private static final String DEFAULT_AUTHORITY_HOST = "https:
private static final int MAX_RETRY_DEFAULT_LIMIT = 3;
private String authorityHost;
private int maxRetry;
private Function<Duration, Duration> retryTimeout;
private ProxyOptions proxyOptions;
private HttpPipeline httpPipeline;
/**
* Creates an instance of IdentityClientOptions with default settings.
*/
/**
* @return the Azure Active Directory endpoint to acquire tokens.
*/
public String getAuthorityHost() {
return authorityHost;
}
/**
* Specifies the Azure Active Directory endpoint to acquire tokens.
* @param authorityHost the Azure Active Directory endpoint
* @return IdentityClientOptions
*/
public IdentityClientOptions setAuthorityHost(String authorityHost) {
this.authorityHost = authorityHost;
return this;
}
/**
* @return the max number of retries when an authentication request fails.
*/
public int getMaxRetry() {
return maxRetry;
}
/**
* Specifies the max number of retries when an authentication request fails.
* @param maxRetry the number of retries
* @return IdentityClientOptions
*/
public IdentityClientOptions setMaxRetry(int maxRetry) {
this.maxRetry = maxRetry;
return this;
}
/**
* @return a Function to calculate seconds of timeout on every retried request.
*/
public Function<Duration, Duration> getRetryTimeout() {
return retryTimeout;
}
/**
* Specifies a Function to calculate seconds of timeout on every retried request.
* @param retryTimeout the Function that returns a timeout in seconds given the number of retry
* @return IdentityClientOptions
*/
public IdentityClientOptions setRetryTimeout(Function<Duration, Duration> retryTimeout) {
this.retryTimeout = retryTimeout;
return this;
}
/**
* @return the options for proxy configuration.
*/
public ProxyOptions getProxyOptions() {
return proxyOptions;
}
/**
* Specifies the options for proxy configuration.
* @param proxyOptions the options for proxy configuration
* @return IdentityClientOptions
*/
public IdentityClientOptions setProxyOptions(ProxyOptions proxyOptions) {
this.proxyOptions = proxyOptions;
return this;
}
/**
* @return the HttpPipeline to send all requests
*/
public HttpPipeline getHttpPipeline() {
return httpPipeline;
}
/**
* Specifies the HttpPipeline to send all requests. This setting overrides the others.
* @param httpPipeline the HttpPipeline to send all requests
* @return IdentityClientOptions
*/
public IdentityClientOptions setHttpPipeline(HttpPipeline httpPipeline) {
this.httpPipeline = httpPipeline;
return this;
}
} | class IdentityClientOptions {
private static final int MAX_RETRY_DEFAULT_LIMIT = 3;
private String authorityHost;
private int maxRetry;
private Function<Duration, Duration> retryTimeout;
private ProxyOptions proxyOptions;
private HttpPipeline httpPipeline;
private ExecutorService executorService;
private Duration tokenRefreshOffset = Duration.ofMinutes(2);
private HttpClient httpClient;
/**
* Creates an instance of IdentityClientOptions with default settings.
*/
/**
* @return the Azure Active Directory endpoint to acquire tokens.
*/
public String getAuthorityHost() {
return authorityHost;
}
/**
* Specifies the Azure Active Directory endpoint to acquire tokens.
* @param authorityHost the Azure Active Directory endpoint
* @return IdentityClientOptions
*/
public IdentityClientOptions setAuthorityHost(String authorityHost) {
this.authorityHost = authorityHost;
return this;
}
/**
* @return the max number of retries when an authentication request fails.
*/
public int getMaxRetry() {
return maxRetry;
}
/**
* Specifies the max number of retries when an authentication request fails.
* @param maxRetry the number of retries
* @return IdentityClientOptions
*/
public IdentityClientOptions setMaxRetry(int maxRetry) {
this.maxRetry = maxRetry;
return this;
}
/**
* @return a Function to calculate seconds of timeout on every retried request.
*/
public Function<Duration, Duration> getRetryTimeout() {
return retryTimeout;
}
/**
* Specifies a Function to calculate seconds of timeout on every retried request.
* @param retryTimeout the Function that returns a timeout in seconds given the number of retry
* @return IdentityClientOptions
*/
public IdentityClientOptions setRetryTimeout(Function<Duration, Duration> retryTimeout) {
this.retryTimeout = retryTimeout;
return this;
}
/**
* @return the options for proxy configuration.
*/
public ProxyOptions getProxyOptions() {
return proxyOptions;
}
/**
* Specifies the options for proxy configuration.
* @param proxyOptions the options for proxy configuration
* @return IdentityClientOptions
*/
public IdentityClientOptions setProxyOptions(ProxyOptions proxyOptions) {
this.proxyOptions = proxyOptions;
return this;
}
/**
* @return the HttpPipeline to send all requests
*/
public HttpPipeline getHttpPipeline() {
return httpPipeline;
}
/**
* @return the HttpClient to use for requests
*/
public HttpClient getHttpClient() {
return httpClient;
}
/**
* Specifies the HttpPipeline to send all requests. This setting overrides the others.
* @param httpPipeline the HttpPipeline to send all requests
* @return IdentityClientOptions
*/
public IdentityClientOptions setHttpPipeline(HttpPipeline httpPipeline) {
this.httpPipeline = httpPipeline;
return this;
}
/**
* Specifies the ExecutorService to be used to execute the authentication requests.
* Developer is responsible for maintaining the lifecycle of the ExecutorService.
*
* <p>
* If this is not configured, the {@link ForkJoinPool
* also shared with other application tasks. If the common pool is heavily used for other tasks, authentication
* requests might starve and setting up this executor service should be considered.
* </p>
*
* <p> The executor service and can be safely shutdown if the TokenCredential is no longer being used by the
* Azure SDK clients and should be shutdown before the application exits. </p>
*
* @param executorService the executor service to use for executing authentication requests.
* @return IdentityClientOptions
*/
public IdentityClientOptions setExecutorService(ExecutorService executorService) {
this.executorService = executorService;
return this;
}
/**
* @return the ExecutorService to execute authentication requests.
*/
public ExecutorService getExecutorService() {
return executorService;
}
/**
* @return how long before the actual token expiry to refresh the token.
*/
public Duration getTokenRefreshOffset() {
return tokenRefreshOffset;
}
/**
* Sets how long before the actual token expiry to refresh the token. The
* token will be considered expired at and after the time of (actual
* expiry - token refresh offset). The default offset is 2 minutes.
*
* This is useful when network is congested and a request containing the
* token takes longer than normal to get to the server.
*
* @param tokenRefreshOffset the duration before the actual expiry of a token to refresh it
* @return IdentityClientOptions
* @throws NullPointerException If {@code tokenRefreshOffset} is null.
*/
public IdentityClientOptions setTokenRefreshOffset(Duration tokenRefreshOffset) {
Objects.requireNonNull(tokenRefreshOffset, "The token refresh offset cannot be null.");
this.tokenRefreshOffset = tokenRefreshOffset;
return this;
}
/**
* Specifies the HttpClient to send use for requests.
* @param httpClient the http client to use for requests
* @return IdentityClientOptions
*/
public IdentityClientOptions setHttpClient(HttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
} |
please use logger, not System.out/err. Please do everywhere. | void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
System.out.println("Container on all client have been deleted successfully");
}
} | System.out.println("Container on all client have been deleted successfully"); | void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
logger.info("Container on all client have been deleted successfully");
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void createClients() {
String csvFile = "clientHostAndKey.txt";
BufferedReader br = null;
String line = "";
String splitBy = ",";
try {
br = new BufferedReader(new FileReader(csvFile));
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length == 2) {
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(hostAndKey[0])
.key(hostAndKey[1])
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, 100000).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
System.out.println("Client have been initialized with data created for host " + hostAndKey[0]);
} else {
System.out.println("Client have been initialized with host " + hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
System.out.println("Total number of client created for ReadThroughputWithMultipleClient " + clientDocsMap.size());
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
} finally {
if (br != null) {
try {
br.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer =
cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, configuration.getThroughput()).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
} else {
logger.info("Client have been initialized with host {}", hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}", clientDocsMap.size());
} catch (FileNotFoundException e) {
logger.error(e.getMessage());
} catch (IOException e) {
logger.error(e.getMessage());
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} |
please don't use System.out/err. use logger here and elsewhere. | private void createClients() {
String csvFile = "clientHostAndKey.txt";
BufferedReader br = null;
String line = "";
String splitBy = ",";
try {
br = new BufferedReader(new FileReader(csvFile));
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length == 2) {
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(hostAndKey[0])
.key(hostAndKey[1])
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, 100000).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
System.out.println("Client have been initialized with data created for host " + hostAndKey[0]);
} else {
System.out.println("Client have been initialized with host " + hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
System.out.println("Total number of client created for ReadThroughputWithMultipleClient " + clientDocsMap.size());
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
} finally {
if (br != null) {
try {
br.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
} | e.printStackTrace(); | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer =
cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, configuration.getThroughput()).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
} else {
logger.info("Client have been initialized with host {}", hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}", clientDocsMap.size());
} catch (FileNotFoundException e) {
logger.error(e.getMessage());
} catch (IOException e) {
logger.error(e.getMessage());
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
System.out.println("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
logger.info("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} |
we should be using logger everywhere. Please replace System.out with logger here and elsewhere. | private void createClients() {
String csvFile = "clientHostAndKey.txt";
BufferedReader br = null;
String line = "";
String splitBy = ",";
try {
br = new BufferedReader(new FileReader(csvFile));
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length == 2) {
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(hostAndKey[0])
.key(hostAndKey[1])
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, 100000).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
System.out.println("Client have been initialized with data created for host " + hostAndKey[0]);
} else {
System.out.println("Client have been initialized with host " + hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
System.out.println("Total number of client created for ReadThroughputWithMultipleClient " + clientDocsMap.size());
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
} finally {
if (br != null) {
try {
br.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
} | System.out.println("Client have been initialized with data created for host " + hostAndKey[0]); | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer =
cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, configuration.getThroughput()).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
} else {
logger.info("Client have been initialized with host {}", hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}", clientDocsMap.size());
} catch (FileNotFoundException e) {
logger.error(e.getMessage());
} catch (IOException e) {
logger.error(e.getMessage());
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
System.out.println("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
logger.info("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} |
please use try-with-resources to simplify the try/catch pattern: https://docs.oracle.com/javase/tutorial/essential/exceptions/tryResourceClose.html | private void createClients() {
String csvFile = "clientHostAndKey.txt";
BufferedReader br = null;
String line = "";
String splitBy = ",";
try {
br = new BufferedReader(new FileReader(csvFile));
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length == 2) {
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(hostAndKey[0])
.key(hostAndKey[1])
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, 100000).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
System.out.println("Client have been initialized with data created for host " + hostAndKey[0]);
} else {
System.out.println("Client have been initialized with host " + hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
System.out.println("Total number of client created for ReadThroughputWithMultipleClient " + clientDocsMap.size());
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
} finally {
if (br != null) {
try {
br.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
} | try { | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer =
cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, configuration.getThroughput()).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
} else {
logger.info("Client have been initialized with host {}", hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}", clientDocsMap.size());
} catch (FileNotFoundException e) {
logger.error(e.getMessage());
} catch (IOException e) {
logger.error(e.getMessage());
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
System.out.println("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
logger.info("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} |
fix code style. | void tryGetValuesFromSystem() {
serviceEndpoint = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("SERVICE_END_POINT")),
serviceEndpoint);
masterKey = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("MASTER_KEY")), masterKey);
databaseId = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("DATABASE_ID")), databaseId);
collectionId = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("COLLECTION_ID")),
collectionId);
documentDataFieldSize = Integer.parseInt(
StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("DOCUMENT_DATA_FIELD_SIZE")),
Integer.toString(documentDataFieldSize)));
maxConnectionPoolSize = Integer.parseInt(
StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("MAX_CONNECTION_POOL_SIZE")),
Integer.toString(maxConnectionPoolSize)));
ConsistencyLevelConverter consistencyLevelConverter = new ConsistencyLevelConverter();
consistencyLevel = consistencyLevelConverter.convert(StringUtils
.defaultString(Strings.emptyToNull(System.getenv().get("CONSISTENCY_LEVEL")), consistencyLevel.name()));
OperationTypeConverter operationTypeConverter = new OperationTypeConverter();
operation = operationTypeConverter.convert(
StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("OPERATION")), operation.name()));
String concurrencyValue = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("CONCURRENCY")),
concurrency == null ? null : Integer.toString(concurrency));
concurrency = concurrencyValue == null ? null : Integer.parseInt(concurrencyValue);
String numberOfOperationsValue = StringUtils.defaultString(
Strings.emptyToNull(System.getenv().get("NUMBER_OF_OPERATIONS")), Integer.toString(numberOfOperations));
numberOfOperations = Integer.parseInt(numberOfOperationsValue);
String throughPutForMultiClientValue = StringUtils.defaultString(
Strings.emptyToNull(System.getenv().get("THROUGHPUT_MULTICLIENT")), Integer.toString(throughPutForMultiClient));
throughPutForMultiClient = Integer.parseInt(throughPutForMultiClientValue);
} | String throughPutForMultiClientValue = StringUtils.defaultString( | void tryGetValuesFromSystem() {
serviceEndpoint = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("SERVICE_END_POINT")),
serviceEndpoint);
masterKey = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("MASTER_KEY")), masterKey);
databaseId = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("DATABASE_ID")), databaseId);
collectionId = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("COLLECTION_ID")),
collectionId);
documentDataFieldSize = Integer.parseInt(
StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("DOCUMENT_DATA_FIELD_SIZE")),
Integer.toString(documentDataFieldSize)));
maxConnectionPoolSize = Integer.parseInt(
StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("MAX_CONNECTION_POOL_SIZE")),
Integer.toString(maxConnectionPoolSize)));
ConsistencyLevelConverter consistencyLevelConverter = new ConsistencyLevelConverter();
consistencyLevel = consistencyLevelConverter.convert(StringUtils
.defaultString(Strings.emptyToNull(System.getenv().get("CONSISTENCY_LEVEL")), consistencyLevel.name()));
OperationTypeConverter operationTypeConverter = new OperationTypeConverter();
operation = operationTypeConverter.convert(
StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("OPERATION")), operation.name()));
String concurrencyValue = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("CONCURRENCY")),
concurrency == null ? null : Integer.toString(concurrency));
concurrency = concurrencyValue == null ? null : Integer.parseInt(concurrencyValue);
String numberOfOperationsValue = StringUtils.defaultString(
Strings.emptyToNull(System.getenv().get("NUMBER_OF_OPERATIONS")), Integer.toString(numberOfOperations));
numberOfOperations = Integer.parseInt(numberOfOperationsValue);
String throughputValue = StringUtils.defaultString(
Strings.emptyToNull(System.getenv().get("THROUGHPUT")), Integer.toString(throughput));
throughput = Integer.parseInt(throughputValue);
} | class ConsistencyLevelConverter implements IStringConverter<ConsistencyLevel> {
/*
* (non-Javadoc)
*
* @see com.beust.jcommander.IStringConverter
*/
@Override
public ConsistencyLevel convert(String value) {
ConsistencyLevel ret = fromString(value);
if (ret == null) {
throw new ParameterException("Value " + value + " can not be converted to ClientType. "
+ "Available values are: " + Arrays.toString(Operation.values()));
}
return ret;
}
} | class ConsistencyLevelConverter implements IStringConverter<ConsistencyLevel> {
/*
* (non-Javadoc)
*
* @see com.beust.jcommander.IStringConverter
*/
@Override
public ConsistencyLevel convert(String value) {
ConsistencyLevel ret = fromString(value);
if (ret == null) {
throw new ParameterException("Value " + value + " can not be converted to ClientType. "
+ "Available values are: " + Arrays.toString(Operation.values()));
}
return ret;
}
} |
it seems you are using `isDeleteCollections` for not only deleting collections but also for creating collection? is that right? the config seems to be overloaded. | private void createClients() {
String csvFile = "clientHostAndKey.txt";
BufferedReader br = null;
String line = "";
String splitBy = ",";
try {
br = new BufferedReader(new FileReader(csvFile));
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length == 2) {
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(hostAndKey[0])
.key(hostAndKey[1])
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, 100000).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
System.out.println("Client have been initialized with data created for host " + hostAndKey[0]);
} else {
System.out.println("Client have been initialized with host " + hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
System.out.println("Total number of client created for ReadThroughputWithMultipleClient " + clientDocsMap.size());
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
} finally {
if (br != null) {
try {
br.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
} | if (!configuration.isDeleteCollections()) { | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer =
cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, configuration.getThroughput()).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
} else {
logger.info("Client have been initialized with host {}", hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}", clientDocsMap.size());
} catch (FileNotFoundException e) {
logger.error(e.getMessage());
} catch (IOException e) {
logger.error(e.getMessage());
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
System.out.println("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
logger.info("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} |
meterRegistry is a static setting, why do we need to set it "inside" the for loop? shouldn't this be outside? | private void createClients() {
String csvFile = "clientHostAndKey.txt";
BufferedReader br = null;
String line = "";
String splitBy = ",";
try {
br = new BufferedReader(new FileReader(csvFile));
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length == 2) {
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(hostAndKey[0])
.key(hostAndKey[1])
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, 100000).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
System.out.println("Client have been initialized with data created for host " + hostAndKey[0]);
} else {
System.out.println("Client have been initialized with host " + hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
System.out.println("Total number of client created for ReadThroughputWithMultipleClient " + clientDocsMap.size());
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
} finally {
if (br != null) {
try {
br.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
} | BridgeInternal.monitorTelemetry(registry); | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer =
cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, configuration.getThroughput()).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
} else {
logger.info("Client have been initialized with host {}", hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}", clientDocsMap.size());
} catch (FileNotFoundException e) {
logger.error(e.getMessage());
} catch (IOException e) {
logger.error(e.getMessage());
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
System.out.println("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
logger.info("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} |
meterRegistry is a static setting, why do we need to set it "inside" the for loop? shouldn't this be outside? | private void createClients() {
String csvFile = "clientHostAndKey.txt";
BufferedReader br = null;
String line = "";
String splitBy = ",";
try {
br = new BufferedReader(new FileReader(csvFile));
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length == 2) {
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(hostAndKey[0])
.key(hostAndKey[1])
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, 100000).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
System.out.println("Client have been initialized with data created for host " + hostAndKey[0]);
} else {
System.out.println("Client have been initialized with host " + hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
System.out.println("Total number of client created for ReadThroughputWithMultipleClient " + clientDocsMap.size());
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
} finally {
if (br != null) {
try {
br.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
} | BridgeInternal.monitorTelemetry(registry); | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer =
cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, configuration.getThroughput()).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
} else {
logger.info("Client have been initialized with host {}", hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}", clientDocsMap.size());
} catch (FileNotFoundException e) {
logger.error(e.getMessage());
} catch (IOException e) {
logger.error(e.getMessage());
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
System.out.println("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
logger.info("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} |
Done | void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
System.out.println("Container on all client have been deleted successfully");
}
} | System.out.println("Container on all client have been deleted successfully"); | void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
logger.info("Container on all client have been deleted successfully");
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void createClients() {
String csvFile = "clientHostAndKey.txt";
BufferedReader br = null;
String line = "";
String splitBy = ",";
try {
br = new BufferedReader(new FileReader(csvFile));
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length == 2) {
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(hostAndKey[0])
.key(hostAndKey[1])
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, 100000).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
System.out.println("Client have been initialized with data created for host " + hostAndKey[0]);
} else {
System.out.println("Client have been initialized with host " + hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
System.out.println("Total number of client created for ReadThroughputWithMultipleClient " + clientDocsMap.size());
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
} finally {
if (br != null) {
try {
br.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer =
cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, configuration.getThroughput()).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
} else {
logger.info("Client have been initialized with host {}", hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}", clientDocsMap.size());
} catch (FileNotFoundException e) {
logger.error(e.getMessage());
} catch (IOException e) {
logger.error(e.getMessage());
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} |
done | private void createClients() {
String csvFile = "clientHostAndKey.txt";
BufferedReader br = null;
String line = "";
String splitBy = ",";
try {
br = new BufferedReader(new FileReader(csvFile));
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length == 2) {
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(hostAndKey[0])
.key(hostAndKey[1])
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, 100000).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
System.out.println("Client have been initialized with data created for host " + hostAndKey[0]);
} else {
System.out.println("Client have been initialized with host " + hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
System.out.println("Total number of client created for ReadThroughputWithMultipleClient " + clientDocsMap.size());
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
} finally {
if (br != null) {
try {
br.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
} | System.out.println("Client have been initialized with data created for host " + hostAndKey[0]); | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer =
cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, configuration.getThroughput()).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
} else {
logger.info("Client have been initialized with host {}", hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}", clientDocsMap.size());
} catch (FileNotFoundException e) {
logger.error(e.getMessage());
} catch (IOException e) {
logger.error(e.getMessage());
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
System.out.println("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
logger.info("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} |
done | void tryGetValuesFromSystem() {
serviceEndpoint = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("SERVICE_END_POINT")),
serviceEndpoint);
masterKey = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("MASTER_KEY")), masterKey);
databaseId = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("DATABASE_ID")), databaseId);
collectionId = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("COLLECTION_ID")),
collectionId);
documentDataFieldSize = Integer.parseInt(
StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("DOCUMENT_DATA_FIELD_SIZE")),
Integer.toString(documentDataFieldSize)));
maxConnectionPoolSize = Integer.parseInt(
StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("MAX_CONNECTION_POOL_SIZE")),
Integer.toString(maxConnectionPoolSize)));
ConsistencyLevelConverter consistencyLevelConverter = new ConsistencyLevelConverter();
consistencyLevel = consistencyLevelConverter.convert(StringUtils
.defaultString(Strings.emptyToNull(System.getenv().get("CONSISTENCY_LEVEL")), consistencyLevel.name()));
OperationTypeConverter operationTypeConverter = new OperationTypeConverter();
operation = operationTypeConverter.convert(
StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("OPERATION")), operation.name()));
String concurrencyValue = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("CONCURRENCY")),
concurrency == null ? null : Integer.toString(concurrency));
concurrency = concurrencyValue == null ? null : Integer.parseInt(concurrencyValue);
String numberOfOperationsValue = StringUtils.defaultString(
Strings.emptyToNull(System.getenv().get("NUMBER_OF_OPERATIONS")), Integer.toString(numberOfOperations));
numberOfOperations = Integer.parseInt(numberOfOperationsValue);
String throughPutForMultiClientValue = StringUtils.defaultString(
Strings.emptyToNull(System.getenv().get("THROUGHPUT_MULTICLIENT")), Integer.toString(throughPutForMultiClient));
throughPutForMultiClient = Integer.parseInt(throughPutForMultiClientValue);
} | String throughPutForMultiClientValue = StringUtils.defaultString( | void tryGetValuesFromSystem() {
serviceEndpoint = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("SERVICE_END_POINT")),
serviceEndpoint);
masterKey = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("MASTER_KEY")), masterKey);
databaseId = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("DATABASE_ID")), databaseId);
collectionId = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("COLLECTION_ID")),
collectionId);
documentDataFieldSize = Integer.parseInt(
StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("DOCUMENT_DATA_FIELD_SIZE")),
Integer.toString(documentDataFieldSize)));
maxConnectionPoolSize = Integer.parseInt(
StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("MAX_CONNECTION_POOL_SIZE")),
Integer.toString(maxConnectionPoolSize)));
ConsistencyLevelConverter consistencyLevelConverter = new ConsistencyLevelConverter();
consistencyLevel = consistencyLevelConverter.convert(StringUtils
.defaultString(Strings.emptyToNull(System.getenv().get("CONSISTENCY_LEVEL")), consistencyLevel.name()));
OperationTypeConverter operationTypeConverter = new OperationTypeConverter();
operation = operationTypeConverter.convert(
StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("OPERATION")), operation.name()));
String concurrencyValue = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("CONCURRENCY")),
concurrency == null ? null : Integer.toString(concurrency));
concurrency = concurrencyValue == null ? null : Integer.parseInt(concurrencyValue);
String numberOfOperationsValue = StringUtils.defaultString(
Strings.emptyToNull(System.getenv().get("NUMBER_OF_OPERATIONS")), Integer.toString(numberOfOperations));
numberOfOperations = Integer.parseInt(numberOfOperationsValue);
String throughputValue = StringUtils.defaultString(
Strings.emptyToNull(System.getenv().get("THROUGHPUT")), Integer.toString(throughput));
throughput = Integer.parseInt(throughputValue);
} | class ConsistencyLevelConverter implements IStringConverter<ConsistencyLevel> {
/*
* (non-Javadoc)
*
* @see com.beust.jcommander.IStringConverter
*/
@Override
public ConsistencyLevel convert(String value) {
ConsistencyLevel ret = fromString(value);
if (ret == null) {
throw new ParameterException("Value " + value + " can not be converted to ClientType. "
+ "Available values are: " + Arrays.toString(Operation.values()));
}
return ret;
}
} | class ConsistencyLevelConverter implements IStringConverter<ConsistencyLevel> {
/*
* (non-Javadoc)
*
* @see com.beust.jcommander.IStringConverter
*/
@Override
public ConsistencyLevel convert(String value) {
ConsistencyLevel ret = fromString(value);
if (ret == null) {
throw new ParameterException("Value " + value + " can not be converted to ClientType. "
+ "Available values are: " + Arrays.toString(Operation.values()));
}
return ret;
}
} |
done | private void createClients() {
String csvFile = "clientHostAndKey.txt";
BufferedReader br = null;
String line = "";
String splitBy = ",";
try {
br = new BufferedReader(new FileReader(csvFile));
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length == 2) {
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(hostAndKey[0])
.key(hostAndKey[1])
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, 100000).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
System.out.println("Client have been initialized with data created for host " + hostAndKey[0]);
} else {
System.out.println("Client have been initialized with host " + hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
System.out.println("Total number of client created for ReadThroughputWithMultipleClient " + clientDocsMap.size());
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
} finally {
if (br != null) {
try {
br.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
} | BridgeInternal.monitorTelemetry(registry); | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer =
cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, configuration.getThroughput()).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
} else {
logger.info("Client have been initialized with host {}", hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}", clientDocsMap.size());
} catch (FileNotFoundException e) {
logger.error(e.getMessage());
} catch (IOException e) {
logger.error(e.getMessage());
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
System.out.println("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
logger.info("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} |
done | private void createClients() {
String csvFile = "clientHostAndKey.txt";
BufferedReader br = null;
String line = "";
String splitBy = ",";
try {
br = new BufferedReader(new FileReader(csvFile));
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length == 2) {
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(hostAndKey[0])
.key(hostAndKey[1])
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, 100000).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
System.out.println("Client have been initialized with data created for host " + hostAndKey[0]);
} else {
System.out.println("Client have been initialized with host " + hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
System.out.println("Total number of client created for ReadThroughputWithMultipleClient " + clientDocsMap.size());
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
} finally {
if (br != null) {
try {
br.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
} | BridgeInternal.monitorTelemetry(registry); | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer =
cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, configuration.getThroughput()).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
} else {
logger.info("Client have been initialized with host {}", hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}", clientDocsMap.size());
} catch (FileNotFoundException e) {
logger.error(e.getMessage());
} catch (IOException e) {
logger.error(e.getMessage());
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
System.out.println("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
logger.info("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} |
during deleteCollections workload we don't want to create db/collection/data . createClient method during initialization will create client/db/container/data. | private void createClients() {
String csvFile = "clientHostAndKey.txt";
BufferedReader br = null;
String line = "";
String splitBy = ",";
try {
br = new BufferedReader(new FileReader(csvFile));
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length == 2) {
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(hostAndKey[0])
.key(hostAndKey[1])
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, 100000).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
System.out.println("Client have been initialized with data created for host " + hostAndKey[0]);
} else {
System.out.println("Client have been initialized with host " + hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
System.out.println("Total number of client created for ReadThroughputWithMultipleClient " + clientDocsMap.size());
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
} finally {
if (br != null) {
try {
br.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
} | if (!configuration.isDeleteCollections()) { | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer =
cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, configuration.getThroughput()).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
} else {
logger.info("Client have been initialized with host {}", hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}", clientDocsMap.size());
} catch (FileNotFoundException e) {
logger.error(e.getMessage());
} catch (IOException e) {
logger.error(e.getMessage());
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
System.out.println("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
logger.info("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} |
done | private void createClients() {
String csvFile = "clientHostAndKey.txt";
BufferedReader br = null;
String line = "";
String splitBy = ",";
try {
br = new BufferedReader(new FileReader(csvFile));
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length == 2) {
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(hostAndKey[0])
.key(hostAndKey[1])
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, 100000).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
System.out.println("Client have been initialized with data created for host " + hostAndKey[0]);
} else {
System.out.println("Client have been initialized with host " + hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
System.out.println("Total number of client created for ReadThroughputWithMultipleClient " + clientDocsMap.size());
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
} finally {
if (br != null) {
try {
br.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
} | e.printStackTrace(); | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer =
cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, configuration.getThroughput()).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
} else {
logger.info("Client have been initialized with host {}", hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}", clientDocsMap.size());
} catch (FileNotFoundException e) {
logger.error(e.getMessage());
} catch (IOException e) {
logger.error(e.getMessage());
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
System.out.println("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
logger.info("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} |
done | private void createClients() {
String csvFile = "clientHostAndKey.txt";
BufferedReader br = null;
String line = "";
String splitBy = ",";
try {
br = new BufferedReader(new FileReader(csvFile));
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length == 2) {
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(hostAndKey[0])
.key(hostAndKey[1])
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, 100000).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
System.out.println("Client have been initialized with data created for host " + hostAndKey[0]);
} else {
System.out.println("Client have been initialized with host " + hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
System.out.println("Total number of client created for ReadThroughputWithMultipleClient " + clientDocsMap.size());
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
} finally {
if (br != null) {
try {
br.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
} | try { | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer =
cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, configuration.getThroughput()).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
} else {
logger.info("Client have been initialized with host {}", hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}", clientDocsMap.size());
} catch (FileNotFoundException e) {
logger.error(e.getMessage());
} catch (IOException e) {
logger.error(e.getMessage());
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
System.out.println("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
logger.info("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} |
you still have e.printStackTrace() throughout the file. please fix everywhere. | private void createClients() {
String csvFile = "clientHostAndKey.txt";
BufferedReader br = null;
String line = "";
String splitBy = ",";
try {
br = new BufferedReader(new FileReader(csvFile));
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length == 2) {
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(hostAndKey[0])
.key(hostAndKey[1])
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, 100000).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
System.out.println("Client have been initialized with data created for host " + hostAndKey[0]);
} else {
System.out.println("Client have been initialized with host " + hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
System.out.println("Total number of client created for ReadThroughputWithMultipleClient " + clientDocsMap.size());
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
} finally {
if (br != null) {
try {
br.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
} | e.printStackTrace(); | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer =
cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, configuration.getThroughput()).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
} else {
logger.info("Client have been initialized with host {}", hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}", clientDocsMap.size());
} catch (FileNotFoundException e) {
logger.error(e.getMessage());
} catch (IOException e) {
logger.error(e.getMessage());
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
System.out.println("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
logger.info("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} |
ditto, we should be using logger.error | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, 100000).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
} else {
logger.info("Client have been initialized with host {}", hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}",clientDocsMap.size());
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
} | e.printStackTrace(); | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer =
cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, configuration.getThroughput()).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
} else {
logger.info("Client have been initialized with host {}", hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}", clientDocsMap.size());
} catch (FileNotFoundException e) {
logger.error(e.getMessage());
} catch (IOException e) {
logger.error(e.getMessage());
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
logger.info("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
logger.info("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} |
code style: you have multiple spaces `String throughputForMultiClientValue` -> `String throughputForMultiClientValue` | void tryGetValuesFromSystem() {
serviceEndpoint = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("SERVICE_END_POINT")),
serviceEndpoint);
masterKey = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("MASTER_KEY")), masterKey);
databaseId = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("DATABASE_ID")), databaseId);
collectionId = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("COLLECTION_ID")),
collectionId);
documentDataFieldSize = Integer.parseInt(
StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("DOCUMENT_DATA_FIELD_SIZE")),
Integer.toString(documentDataFieldSize)));
maxConnectionPoolSize = Integer.parseInt(
StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("MAX_CONNECTION_POOL_SIZE")),
Integer.toString(maxConnectionPoolSize)));
ConsistencyLevelConverter consistencyLevelConverter = new ConsistencyLevelConverter();
consistencyLevel = consistencyLevelConverter.convert(StringUtils
.defaultString(Strings.emptyToNull(System.getenv().get("CONSISTENCY_LEVEL")), consistencyLevel.name()));
OperationTypeConverter operationTypeConverter = new OperationTypeConverter();
operation = operationTypeConverter.convert(
StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("OPERATION")), operation.name()));
String concurrencyValue = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("CONCURRENCY")),
concurrency == null ? null : Integer.toString(concurrency));
concurrency = concurrencyValue == null ? null : Integer.parseInt(concurrencyValue);
String numberOfOperationsValue = StringUtils.defaultString(
Strings.emptyToNull(System.getenv().get("NUMBER_OF_OPERATIONS")), Integer.toString(numberOfOperations));
numberOfOperations = Integer.parseInt(numberOfOperationsValue);
String throughputForMultiClientValue = StringUtils.defaultString(
Strings.emptyToNull(System.getenv().get("THROUGHPUT_MULTICLIENT")),Integer.toString(throughputForMultiClient));
throughputForMultiClient = Integer.parseInt(throughputForMultiClientValue);
} | String throughputForMultiClientValue = StringUtils.defaultString( | void tryGetValuesFromSystem() {
serviceEndpoint = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("SERVICE_END_POINT")),
serviceEndpoint);
masterKey = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("MASTER_KEY")), masterKey);
databaseId = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("DATABASE_ID")), databaseId);
collectionId = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("COLLECTION_ID")),
collectionId);
documentDataFieldSize = Integer.parseInt(
StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("DOCUMENT_DATA_FIELD_SIZE")),
Integer.toString(documentDataFieldSize)));
maxConnectionPoolSize = Integer.parseInt(
StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("MAX_CONNECTION_POOL_SIZE")),
Integer.toString(maxConnectionPoolSize)));
ConsistencyLevelConverter consistencyLevelConverter = new ConsistencyLevelConverter();
consistencyLevel = consistencyLevelConverter.convert(StringUtils
.defaultString(Strings.emptyToNull(System.getenv().get("CONSISTENCY_LEVEL")), consistencyLevel.name()));
OperationTypeConverter operationTypeConverter = new OperationTypeConverter();
operation = operationTypeConverter.convert(
StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("OPERATION")), operation.name()));
String concurrencyValue = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("CONCURRENCY")),
concurrency == null ? null : Integer.toString(concurrency));
concurrency = concurrencyValue == null ? null : Integer.parseInt(concurrencyValue);
String numberOfOperationsValue = StringUtils.defaultString(
Strings.emptyToNull(System.getenv().get("NUMBER_OF_OPERATIONS")), Integer.toString(numberOfOperations));
numberOfOperations = Integer.parseInt(numberOfOperationsValue);
String throughputValue = StringUtils.defaultString(
Strings.emptyToNull(System.getenv().get("THROUGHPUT")), Integer.toString(throughput));
throughput = Integer.parseInt(throughputValue);
} | class ConsistencyLevelConverter implements IStringConverter<ConsistencyLevel> {
/*
* (non-Javadoc)
*
* @see com.beust.jcommander.IStringConverter
*/
@Override
public ConsistencyLevel convert(String value) {
ConsistencyLevel ret = fromString(value);
if (ret == null) {
throw new ParameterException("Value " + value + " can not be converted to ClientType. "
+ "Available values are: " + Arrays.toString(Operation.values()));
}
return ret;
}
} | class ConsistencyLevelConverter implements IStringConverter<ConsistencyLevel> {
/*
* (non-Javadoc)
*
* @see com.beust.jcommander.IStringConverter
*/
@Override
public ConsistencyLevel convert(String value) {
ConsistencyLevel ret = fromString(value);
if (ret == null) {
throw new ParameterException("Value " + value + " can not be converted to ClientType. "
+ "Available values are: " + Arrays.toString(Operation.values()));
}
return ret;
}
} |
code style: space after comma | void tryGetValuesFromSystem() {
serviceEndpoint = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("SERVICE_END_POINT")),
serviceEndpoint);
masterKey = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("MASTER_KEY")), masterKey);
databaseId = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("DATABASE_ID")), databaseId);
collectionId = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("COLLECTION_ID")),
collectionId);
documentDataFieldSize = Integer.parseInt(
StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("DOCUMENT_DATA_FIELD_SIZE")),
Integer.toString(documentDataFieldSize)));
maxConnectionPoolSize = Integer.parseInt(
StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("MAX_CONNECTION_POOL_SIZE")),
Integer.toString(maxConnectionPoolSize)));
ConsistencyLevelConverter consistencyLevelConverter = new ConsistencyLevelConverter();
consistencyLevel = consistencyLevelConverter.convert(StringUtils
.defaultString(Strings.emptyToNull(System.getenv().get("CONSISTENCY_LEVEL")), consistencyLevel.name()));
OperationTypeConverter operationTypeConverter = new OperationTypeConverter();
operation = operationTypeConverter.convert(
StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("OPERATION")), operation.name()));
String concurrencyValue = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("CONCURRENCY")),
concurrency == null ? null : Integer.toString(concurrency));
concurrency = concurrencyValue == null ? null : Integer.parseInt(concurrencyValue);
String numberOfOperationsValue = StringUtils.defaultString(
Strings.emptyToNull(System.getenv().get("NUMBER_OF_OPERATIONS")), Integer.toString(numberOfOperations));
numberOfOperations = Integer.parseInt(numberOfOperationsValue);
String throughputForMultiClientValue = StringUtils.defaultString(
Strings.emptyToNull(System.getenv().get("THROUGHPUT_MULTICLIENT")),Integer.toString(throughputForMultiClient));
throughputForMultiClient = Integer.parseInt(throughputForMultiClientValue);
} | Strings.emptyToNull(System.getenv().get("THROUGHPUT_MULTICLIENT")),Integer.toString(throughputForMultiClient)); | void tryGetValuesFromSystem() {
serviceEndpoint = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("SERVICE_END_POINT")),
serviceEndpoint);
masterKey = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("MASTER_KEY")), masterKey);
databaseId = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("DATABASE_ID")), databaseId);
collectionId = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("COLLECTION_ID")),
collectionId);
documentDataFieldSize = Integer.parseInt(
StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("DOCUMENT_DATA_FIELD_SIZE")),
Integer.toString(documentDataFieldSize)));
maxConnectionPoolSize = Integer.parseInt(
StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("MAX_CONNECTION_POOL_SIZE")),
Integer.toString(maxConnectionPoolSize)));
ConsistencyLevelConverter consistencyLevelConverter = new ConsistencyLevelConverter();
consistencyLevel = consistencyLevelConverter.convert(StringUtils
.defaultString(Strings.emptyToNull(System.getenv().get("CONSISTENCY_LEVEL")), consistencyLevel.name()));
OperationTypeConverter operationTypeConverter = new OperationTypeConverter();
operation = operationTypeConverter.convert(
StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("OPERATION")), operation.name()));
String concurrencyValue = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("CONCURRENCY")),
concurrency == null ? null : Integer.toString(concurrency));
concurrency = concurrencyValue == null ? null : Integer.parseInt(concurrencyValue);
String numberOfOperationsValue = StringUtils.defaultString(
Strings.emptyToNull(System.getenv().get("NUMBER_OF_OPERATIONS")), Integer.toString(numberOfOperations));
numberOfOperations = Integer.parseInt(numberOfOperationsValue);
String throughputValue = StringUtils.defaultString(
Strings.emptyToNull(System.getenv().get("THROUGHPUT")), Integer.toString(throughput));
throughput = Integer.parseInt(throughputValue);
} | class ConsistencyLevelConverter implements IStringConverter<ConsistencyLevel> {
/*
* (non-Javadoc)
*
* @see com.beust.jcommander.IStringConverter
*/
@Override
public ConsistencyLevel convert(String value) {
ConsistencyLevel ret = fromString(value);
if (ret == null) {
throw new ParameterException("Value " + value + " can not be converted to ClientType. "
+ "Available values are: " + Arrays.toString(Operation.values()));
}
return ret;
}
} | class ConsistencyLevelConverter implements IStringConverter<ConsistencyLevel> {
/*
* (non-Javadoc)
*
* @see com.beust.jcommander.IStringConverter
*/
@Override
public ConsistencyLevel convert(String value) {
ConsistencyLevel ret = fromString(value);
if (ret == null) {
throw new ParameterException("Value " + value + " can not be converted to ClientType. "
+ "Available values are: " + Arrays.toString(Operation.values()));
}
return ret;
}
} |
this will print the key as well. can we just print the endpoint. ideally we should not log key anywhere in the log. | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, 100000).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
} else {
logger.info("Client have been initialized with host {}", hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}",clientDocsMap.size());
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
} | logger.info("Client have been initialized with data created for host {}", hostAndKey[0]); | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer =
cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, configuration.getThroughput()).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
} else {
logger.info("Client have been initialized with host {}", hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}", clientDocsMap.size());
} catch (FileNotFoundException e) {
logger.error(e.getMessage());
} catch (IOException e) {
logger.error(e.getMessage());
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
logger.info("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
logger.info("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} |
ditto. we should not log the key | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, 100000).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
} else {
logger.info("Client have been initialized with host {}", hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}",clientDocsMap.size());
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
} | logger.info("Client have been initialized with host {}", hostAndKey[0]); | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer =
cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, configuration.getThroughput()).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
} else {
logger.info("Client have been initialized with host {}", hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}", clientDocsMap.size());
} catch (FileNotFoundException e) {
logger.error(e.getMessage());
} catch (IOException e) {
logger.error(e.getMessage());
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
logger.info("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
logger.info("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} |
codestyle: space after comma please use intellij autoformatting on this new file. | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, 100000).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
} else {
logger.info("Client have been initialized with host {}", hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}",clientDocsMap.size());
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
} | logger.info("Total number of client created for ReadThroughputWithMultipleClient {}",clientDocsMap.size()); | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer =
cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, configuration.getThroughput()).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
} else {
logger.info("Client have been initialized with host {}", hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}", clientDocsMap.size());
} catch (FileNotFoundException e) {
logger.error(e.getMessage());
} catch (IOException e) {
logger.error(e.getMessage());
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
logger.info("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
logger.info("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} |
setting metrics registery should be outside of try/catch block. not related to connection string parsing. | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, 100000).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
} else {
logger.info("Client have been initialized with host {}", hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}",clientDocsMap.size());
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
} | MeterRegistry registry = configuration.getAzureMonitorMeterRegistry(); | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer =
cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, configuration.getThroughput()).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
} else {
logger.info("Client have been initialized with host {}", hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}", clientDocsMap.size());
} catch (FileNotFoundException e) {
logger.error(e.getMessage());
} catch (IOException e) {
logger.error(e.getMessage());
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
logger.info("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
logger.info("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} |
if we are creating the collection, we should allow the throughput to be configurable similar to collectionId and databaseId. | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, 100000).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
} else {
logger.info("Client have been initialized with host {}", hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}",clientDocsMap.size());
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
} | CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, 100000).block().getContainer(); | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer =
cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, configuration.getThroughput()).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
} else {
logger.info("Client have been initialized with host {}", hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}", clientDocsMap.size());
} catch (FileNotFoundException e) {
logger.error(e.getMessage());
} catch (IOException e) {
logger.error(e.getMessage());
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
logger.info("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
logger.info("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} |
discussed offline | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, 100000).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
} else {
logger.info("Client have been initialized with host {}", hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}",clientDocsMap.size());
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
} | logger.info("Client have been initialized with data created for host {}", hostAndKey[0]); | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer =
cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, configuration.getThroughput()).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
} else {
logger.info("Client have been initialized with host {}", hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}", clientDocsMap.size());
} catch (FileNotFoundException e) {
logger.error(e.getMessage());
} catch (IOException e) {
logger.error(e.getMessage());
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
logger.info("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
logger.info("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} |
discussed offline | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, 100000).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
} else {
logger.info("Client have been initialized with host {}", hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}",clientDocsMap.size());
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
} | logger.info("Client have been initialized with host {}", hostAndKey[0]); | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer =
cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, configuration.getThroughput()).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
} else {
logger.info("Client have been initialized with host {}", hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}", clientDocsMap.size());
} catch (FileNotFoundException e) {
logger.error(e.getMessage());
} catch (IOException e) {
logger.error(e.getMessage());
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
logger.info("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
logger.info("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} |
it meant to be , had created configuration property for it .It was missed during usage , fixed it . | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, 100000).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
} else {
logger.info("Client have been initialized with host {}", hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}",clientDocsMap.size());
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
} | CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, 100000).block().getContainer(); | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer =
cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, configuration.getThroughput()).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
} else {
logger.info("Client have been initialized with host {}", hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}", clientDocsMap.size());
} catch (FileNotFoundException e) {
logger.error(e.getMessage());
} catch (IOException e) {
logger.error(e.getMessage());
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
logger.info("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
logger.info("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} |
done | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, 100000).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
} else {
logger.info("Client have been initialized with host {}", hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}",clientDocsMap.size());
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
} | MeterRegistry registry = configuration.getAzureMonitorMeterRegistry(); | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer =
cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, configuration.getThroughput()).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
} else {
logger.info("Client have been initialized with host {}", hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}", clientDocsMap.size());
} catch (FileNotFoundException e) {
logger.error(e.getMessage());
} catch (IOException e) {
logger.error(e.getMessage());
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
logger.info("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
logger.info("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} |
discussed offline | private void createClients() {
String csvFile = "clientHostAndKey.txt";
BufferedReader br = null;
String line = "";
String splitBy = ",";
try {
br = new BufferedReader(new FileReader(csvFile));
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length == 2) {
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(hostAndKey[0])
.key(hostAndKey[1])
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, 100000).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
System.out.println("Client have been initialized with data created for host " + hostAndKey[0]);
} else {
System.out.println("Client have been initialized with host " + hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
System.out.println("Total number of client created for ReadThroughputWithMultipleClient " + clientDocsMap.size());
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
} finally {
if (br != null) {
try {
br.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
} | if (!configuration.isDeleteCollections()) { | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
if (!configuration.isDeleteCollections()) {
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer =
cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), PARTITION_KEY, configuration.getThroughput()).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
} else {
logger.info("Client have been initialized with host {}", hostAndKey[0]);
}
clientDocsMap.put(asyncClient, docsToRead);
}
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}", clientDocsMap.size());
} catch (FileNotFoundException e) {
logger.error(e.getMessage());
} catch (IOException e) {
logger.error(e.getMessage());
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
System.out.println("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} | class AsynReadWithMultipleClients<T> {
private final static String PARTITION_KEY = "/pk";
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void deleteCollection() {
try {
for (CosmosAsyncClient cosmosAsyncClient : clientDocsMap.keySet()) {
cosmosAsyncClient.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).delete().block();
}
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
} else {
throw e;
}
} finally {
logger.info("Container on all client have been deleted successfully");
}
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} |
Could you use one check of CoreUtil.isNullOrEmpty() here? | public AzureKeyCredential update(String key) {
Objects.requireNonNull(key);
if (key.isEmpty()) {
throw logger.logExceptionAsError(new IllegalArgumentException("'key' cannot be empty."));
}
this.key = key;
return this;
} | if (key.isEmpty()) { | public AzureKeyCredential update(String key) {
Objects.requireNonNull(key, "'key' cannot be null.");
if (key.isEmpty()) {
throw logger.logExceptionAsError(new IllegalArgumentException("'key' cannot be empty."));
}
this.key = key;
return this;
} | class AzureKeyCredential {
private final ClientLogger logger = new ClientLogger(AzureKeyCredential.class);
private String key;
/**
* Creates a credential that authorizes request with the given key.
*
* @param key The key used to authorize requests.
* @throws NullPointerException If {@code key} is {@code null}.
* @throws IllegalArgumentException If {@code key} is an empty string.
*/
public AzureKeyCredential(String key) {
Objects.requireNonNull(key);
if (key.isEmpty()) {
throw logger.logExceptionAsError(new IllegalArgumentException("'key' cannot be empty."));
}
this.key = key;
}
/**
* Retrieves the key associated to this credential.
*
* @return The key being used to authorize requests.
*/
public String getKey() {
return key;
}
/**
* Rotates the key associated to this credential.
*
* @param key The new key to associated with this credential.
* @return The updated {@code ApiKeyCredential} object.
* @throws NullPointerException If {@code key} is {@code null}.
* @throws IllegalArgumentException If {@code key} is an empty string.
*/
} | class AzureKeyCredential {
private final ClientLogger logger = new ClientLogger(AzureKeyCredential.class);
private String key;
/**
* Creates a credential that authorizes request with the given key.
*
* @param key The key used to authorize requests.
* @throws NullPointerException If {@code key} is {@code null}.
* @throws IllegalArgumentException If {@code key} is an empty string.
*/
public AzureKeyCredential(String key) {
Objects.requireNonNull(key, "'key' cannot be null.");
if (key.isEmpty()) {
throw logger.logExceptionAsError(new IllegalArgumentException("'key' cannot be empty."));
}
this.key = key;
}
/**
* Retrieves the key associated to this credential.
*
* @return The key being used to authorize requests.
*/
public String getKey() {
return key;
}
/**
* Rotates the key associated to this credential.
*
* @param key The new key to associated with this credential.
* @return The updated {@code ApiKeyCredential} object.
* @throws NullPointerException If {@code key} is {@code null}.
* @throws IllegalArgumentException If {@code key} is an empty string.
*/
} |
Definitely could but our guidelines state if we expect a non-null parameter and we get null a `NullPointerException` should be thrown (the `Objects.requireNonNull` check) and then if we get a value that we don't expect that isn't null throw a `IllegalArgumentException` (the `isEmpty` check). | public AzureKeyCredential update(String key) {
Objects.requireNonNull(key);
if (key.isEmpty()) {
throw logger.logExceptionAsError(new IllegalArgumentException("'key' cannot be empty."));
}
this.key = key;
return this;
} | if (key.isEmpty()) { | public AzureKeyCredential update(String key) {
Objects.requireNonNull(key, "'key' cannot be null.");
if (key.isEmpty()) {
throw logger.logExceptionAsError(new IllegalArgumentException("'key' cannot be empty."));
}
this.key = key;
return this;
} | class AzureKeyCredential {
private final ClientLogger logger = new ClientLogger(AzureKeyCredential.class);
private String key;
/**
* Creates a credential that authorizes request with the given key.
*
* @param key The key used to authorize requests.
* @throws NullPointerException If {@code key} is {@code null}.
* @throws IllegalArgumentException If {@code key} is an empty string.
*/
public AzureKeyCredential(String key) {
Objects.requireNonNull(key);
if (key.isEmpty()) {
throw logger.logExceptionAsError(new IllegalArgumentException("'key' cannot be empty."));
}
this.key = key;
}
/**
* Retrieves the key associated to this credential.
*
* @return The key being used to authorize requests.
*/
public String getKey() {
return key;
}
/**
* Rotates the key associated to this credential.
*
* @param key The new key to associated with this credential.
* @return The updated {@code ApiKeyCredential} object.
* @throws NullPointerException If {@code key} is {@code null}.
* @throws IllegalArgumentException If {@code key} is an empty string.
*/
} | class AzureKeyCredential {
private final ClientLogger logger = new ClientLogger(AzureKeyCredential.class);
private String key;
/**
* Creates a credential that authorizes request with the given key.
*
* @param key The key used to authorize requests.
* @throws NullPointerException If {@code key} is {@code null}.
* @throws IllegalArgumentException If {@code key} is an empty string.
*/
public AzureKeyCredential(String key) {
Objects.requireNonNull(key, "'key' cannot be null.");
if (key.isEmpty()) {
throw logger.logExceptionAsError(new IllegalArgumentException("'key' cannot be empty."));
}
this.key = key;
}
/**
* Retrieves the key associated to this credential.
*
* @return The key being used to authorize requests.
*/
public String getKey() {
return key;
}
/**
* Rotates the key associated to this credential.
*
* @param key The new key to associated with this credential.
* @return The updated {@code ApiKeyCredential} object.
* @throws NullPointerException If {@code key} is {@code null}.
* @throws IllegalArgumentException If {@code key} is an empty string.
*/
} |
too much indentation? | public void testExcludeCredentials() throws Exception {
DefaultAzureCredential credential = new DefaultAzureCredentialBuilder()
.excludeEnvironmentCredential()
.excludeAzureCliCredential()
.excludeManagedIdentityCredential()
.excludeSharedTokenCacheCredential()
.build();
} | .excludeEnvironmentCredential() | public void testExcludeCredentials() throws Exception {
DefaultAzureCredential credential = new DefaultAzureCredentialBuilder()
.excludeEnvironmentCredential()
.excludeAzureCliCredential()
.excludeManagedIdentityCredential()
.excludeSharedTokenCacheCredential()
.build();
} | class DefaultAzureCredentialTest {
private final String tenantId = "contoso.com";
private final String clientId = UUID.randomUUID().toString();
@Test
public void testUseEnvironmentCredential() throws Exception {
Configuration configuration = Configuration.getGlobalConfiguration();
try {
String secret = "secret";
String token1 = "token1";
TokenRequestContext request1 = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
configuration.put("AZURE_CLIENT_ID", clientId);
configuration.put("AZURE_CLIENT_SECRET", secret);
configuration.put("AZURE_TENANT_ID", tenantId);
IdentityClient identityClient = PowerMockito.mock(IdentityClient.class);
when(identityClient.authenticateWithClientSecret(secret, request1)).thenReturn(TestUtils.getMockAccessToken(token1, expiresOn));
PowerMockito.whenNew(IdentityClient.class).withAnyArguments().thenReturn(identityClient);
DefaultAzureCredential credential = new DefaultAzureCredentialBuilder().build();
StepVerifier.create(credential.getToken(request1))
.expectNextMatches(accessToken -> token1.equals(accessToken.getToken())
&& expiresOn.getSecond() == accessToken.getExpiresAt().getSecond())
.verifyComplete();
} finally {
configuration.remove("AZURE_CLIENT_ID");
configuration.remove("AZURE_CLIENT_SECRET");
configuration.remove("AZURE_TENANT_ID");
}
}
@Test
public void testUseManagedIdentityCredential() throws Exception {
String token1 = "token1";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresAt = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
IdentityClient identityClient = PowerMockito.mock(IdentityClient.class);
when(identityClient.authenticateToIMDSEndpoint(request)).thenReturn(TestUtils.getMockAccessToken(token1, expiresAt));
PowerMockito.whenNew(IdentityClient.class).withAnyArguments().thenReturn(identityClient);
DefaultAzureCredential credential = new DefaultAzureCredentialBuilder().build();
StepVerifier.create(credential.getToken(request))
.expectNextMatches(accessToken -> token1.equals(accessToken.getToken())
&& expiresAt.getSecond() == accessToken.getExpiresAt().getSecond())
.verifyComplete();
}
@Test
public void testUseAzureCliCredential() throws Exception {
String token1 = "token1";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresAt = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
IdentityClient identityClient = PowerMockito.mock(IdentityClient.class);
when(identityClient.authenticateWithAzureCli(request)).thenReturn(TestUtils.getMockAccessToken(token1, expiresAt));
when(identityClient.authenticateToIMDSEndpoint(request)).thenReturn(Mono.empty());
PowerMockito.whenNew(IdentityClient.class).withAnyArguments().thenReturn(identityClient);
DefaultAzureCredential credential = new DefaultAzureCredentialBuilder().build();
StepVerifier.create(credential.getToken(request))
.expectNextMatches(accessToken -> token1.equals(accessToken.getToken())
&& expiresAt.getSecond() == accessToken.getExpiresAt().getSecond())
.verifyComplete();
}
@Test
public void testNoCredentialWorks() throws Exception {
TokenRequestContext request = new TokenRequestContext().addScopes("https:
IdentityClient identityClient = PowerMockito.mock(IdentityClient.class);
when(identityClient.authenticateToIMDSEndpoint(request)).thenReturn(Mono.error(new RuntimeException("Cannot get token from managed identity")));
PowerMockito.whenNew(IdentityClient.class).withAnyArguments().thenReturn(identityClient);
SharedTokenCacheCredential sharedTokenCacheCredential = PowerMockito.mock(SharedTokenCacheCredential.class);
when(sharedTokenCacheCredential.getToken(request)).thenReturn(Mono.error(new RuntimeException("Cannot get token from shared token cache")));
PowerMockito.whenNew(SharedTokenCacheCredential.class).withAnyArguments().thenReturn(sharedTokenCacheCredential);
AzureCliCredential azureCliCredential = PowerMockito.mock(AzureCliCredential.class);
when(azureCliCredential.getToken(request)).thenReturn(Mono.error(new RuntimeException("Cannot get token from Azure CLI credential")));
PowerMockito.whenNew(AzureCliCredential.class).withAnyArguments().thenReturn(azureCliCredential);
DefaultAzureCredential credential = new DefaultAzureCredentialBuilder().build();
StepVerifier.create(credential.getToken(request))
.expectErrorMatches(t -> t instanceof RuntimeException && t.getMessage()
.matches("Tried EnvironmentCredential, ManagedIdentityCredential, "
+ "SharedTokenCacheCredential"
+ "[\\$\\w]+\\$\\d*,\\s+AzureCliCredential[\\$\\w\\s\\.]+"))
.verify();
}
@Test(expected = IllegalArgumentException.class)
@Test
public void testExclueEnvironmentCredential() throws Exception {
Configuration configuration = Configuration.getGlobalConfiguration();
TokenRequestContext request1 = new TokenRequestContext().addScopes("https:
DefaultAzureCredential credential = new DefaultAzureCredentialBuilder()
.excludeEnvironmentCredential()
.build();
StepVerifier.create(credential.getToken(request1))
.expectErrorMatches(t -> t instanceof RuntimeException && t.getMessage()
.startsWith("Tried ManagedIdentityCredential, "
+ "SharedTokenCacheCredential, "
+ "AzureCliCredential"))
.verify();
}
@Test
public void testExclueManagedIdentityCredential() throws Exception {
Configuration configuration = Configuration.getGlobalConfiguration();
TokenRequestContext request1 = new TokenRequestContext().addScopes("https:
DefaultAzureCredential credential = new DefaultAzureCredentialBuilder()
.excludeManagedIdentityCredential()
.build();
StepVerifier.create(credential.getToken(request1))
.expectErrorMatches(t -> t instanceof RuntimeException && t.getMessage()
.startsWith("Tried EnvironmentCredential, "
+ "SharedTokenCacheCredential, "
+ "AzureCliCredential"))
.verify();
}
@Test
public void testExcludeSharedTokenCacheCredential() throws Exception {
Configuration configuration = Configuration.getGlobalConfiguration();
TokenRequestContext request1 = new TokenRequestContext().addScopes("https:
DefaultAzureCredential credential = new DefaultAzureCredentialBuilder()
.excludeEnvironmentCredential()
.excludeSharedTokenCacheCredential()
.build();
StepVerifier.create(credential.getToken(request1))
.expectErrorMatches(t -> t instanceof RuntimeException && t.getMessage()
.startsWith("Tried "
+ "ManagedIdentityCredential, "
+ "AzureCliCredential"))
.verify();
}
@Test
public void testExcludeAzureCliCredential() throws Exception {
Configuration configuration = Configuration.getGlobalConfiguration();
TokenRequestContext request1 = new TokenRequestContext().addScopes("https:
DefaultAzureCredential credential = new DefaultAzureCredentialBuilder()
.excludeEnvironmentCredential()
.excludeAzureCliCredential()
.build();
StepVerifier.create(credential.getToken(request1))
.expectErrorMatches(t -> t instanceof RuntimeException && t.getMessage()
.startsWith("Tried "
+ "ManagedIdentityCredential, "
+ "SharedTokenCacheCredential but"))
.verify();
}
} | class DefaultAzureCredentialTest {
private final String tenantId = "contoso.com";
private final String clientId = UUID.randomUUID().toString();
@Test
public void testUseEnvironmentCredential() throws Exception {
Configuration configuration = Configuration.getGlobalConfiguration();
try {
String secret = "secret";
String token1 = "token1";
TokenRequestContext request1 = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
configuration.put("AZURE_CLIENT_ID", clientId);
configuration.put("AZURE_CLIENT_SECRET", secret);
configuration.put("AZURE_TENANT_ID", tenantId);
IdentityClient identityClient = PowerMockito.mock(IdentityClient.class);
when(identityClient.authenticateWithClientSecret(secret, request1)).thenReturn(TestUtils.getMockAccessToken(token1, expiresOn));
PowerMockito.whenNew(IdentityClient.class).withAnyArguments().thenReturn(identityClient);
DefaultAzureCredential credential = new DefaultAzureCredentialBuilder().build();
StepVerifier.create(credential.getToken(request1))
.expectNextMatches(accessToken -> token1.equals(accessToken.getToken())
&& expiresOn.getSecond() == accessToken.getExpiresAt().getSecond())
.verifyComplete();
} finally {
configuration.remove("AZURE_CLIENT_ID");
configuration.remove("AZURE_CLIENT_SECRET");
configuration.remove("AZURE_TENANT_ID");
}
}
@Test
public void testUseManagedIdentityCredential() throws Exception {
String token1 = "token1";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresAt = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
IdentityClient identityClient = PowerMockito.mock(IdentityClient.class);
when(identityClient.authenticateToIMDSEndpoint(request)).thenReturn(TestUtils.getMockAccessToken(token1, expiresAt));
PowerMockito.whenNew(IdentityClient.class).withAnyArguments().thenReturn(identityClient);
DefaultAzureCredential credential = new DefaultAzureCredentialBuilder().build();
StepVerifier.create(credential.getToken(request))
.expectNextMatches(accessToken -> token1.equals(accessToken.getToken())
&& expiresAt.getSecond() == accessToken.getExpiresAt().getSecond())
.verifyComplete();
}
@Test
public void testUseAzureCliCredential() throws Exception {
String token1 = "token1";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresAt = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
IdentityClient identityClient = PowerMockito.mock(IdentityClient.class);
when(identityClient.authenticateWithAzureCli(request)).thenReturn(TestUtils.getMockAccessToken(token1, expiresAt));
when(identityClient.authenticateToIMDSEndpoint(request)).thenReturn(Mono.empty());
PowerMockito.whenNew(IdentityClient.class).withAnyArguments().thenReturn(identityClient);
DefaultAzureCredential credential = new DefaultAzureCredentialBuilder().build();
StepVerifier.create(credential.getToken(request))
.expectNextMatches(accessToken -> token1.equals(accessToken.getToken())
&& expiresAt.getSecond() == accessToken.getExpiresAt().getSecond())
.verifyComplete();
}
@Test
public void testNoCredentialWorks() throws Exception {
TokenRequestContext request = new TokenRequestContext().addScopes("https:
IdentityClient identityClient = PowerMockito.mock(IdentityClient.class);
when(identityClient.authenticateToIMDSEndpoint(request)).thenReturn(Mono.error(new RuntimeException("Cannot get token from managed identity")));
PowerMockito.whenNew(IdentityClient.class).withAnyArguments().thenReturn(identityClient);
SharedTokenCacheCredential sharedTokenCacheCredential = PowerMockito.mock(SharedTokenCacheCredential.class);
when(sharedTokenCacheCredential.getToken(request)).thenReturn(Mono.error(new RuntimeException("Cannot get token from shared token cache")));
PowerMockito.whenNew(SharedTokenCacheCredential.class).withAnyArguments().thenReturn(sharedTokenCacheCredential);
AzureCliCredential azureCliCredential = PowerMockito.mock(AzureCliCredential.class);
when(azureCliCredential.getToken(request)).thenReturn(Mono.error(new RuntimeException("Cannot get token from Azure CLI credential")));
PowerMockito.whenNew(AzureCliCredential.class).withAnyArguments().thenReturn(azureCliCredential);
DefaultAzureCredential credential = new DefaultAzureCredentialBuilder().build();
StepVerifier.create(credential.getToken(request))
.expectErrorMatches(t -> t instanceof RuntimeException && t.getMessage()
.matches("Tried EnvironmentCredential, ManagedIdentityCredential, "
+ "SharedTokenCacheCredential"
+ "[\\$\\w]+\\$\\d*,\\s+AzureCliCredential[\\$\\w\\s\\.]+"))
.verify();
}
@Test(expected = IllegalArgumentException.class)
@Test
public void testExclueEnvironmentCredential() throws Exception {
Configuration configuration = Configuration.getGlobalConfiguration();
TokenRequestContext request1 = new TokenRequestContext().addScopes("https:
DefaultAzureCredential credential = new DefaultAzureCredentialBuilder()
.excludeEnvironmentCredential()
.build();
StepVerifier.create(credential.getToken(request1))
.expectErrorMatches(t -> t instanceof RuntimeException && t.getMessage()
.startsWith("Tried ManagedIdentityCredential, "
+ "SharedTokenCacheCredential, "
+ "AzureCliCredential"))
.verify();
}
@Test
public void testExclueManagedIdentityCredential() throws Exception {
Configuration configuration = Configuration.getGlobalConfiguration();
TokenRequestContext request1 = new TokenRequestContext().addScopes("https:
DefaultAzureCredential credential = new DefaultAzureCredentialBuilder()
.excludeManagedIdentityCredential()
.build();
StepVerifier.create(credential.getToken(request1))
.expectErrorMatches(t -> t instanceof RuntimeException && t.getMessage()
.startsWith("Tried EnvironmentCredential, "
+ "SharedTokenCacheCredential, "
+ "AzureCliCredential"))
.verify();
}
@Test
public void testExcludeSharedTokenCacheCredential() throws Exception {
Configuration configuration = Configuration.getGlobalConfiguration();
TokenRequestContext request1 = new TokenRequestContext().addScopes("https:
DefaultAzureCredential credential = new DefaultAzureCredentialBuilder()
.excludeEnvironmentCredential()
.excludeSharedTokenCacheCredential()
.build();
StepVerifier.create(credential.getToken(request1))
.expectErrorMatches(t -> t instanceof RuntimeException && t.getMessage()
.startsWith("Tried "
+ "ManagedIdentityCredential, "
+ "AzureCliCredential"))
.verify();
}
@Test
public void testExcludeAzureCliCredential() throws Exception {
Configuration configuration = Configuration.getGlobalConfiguration();
TokenRequestContext request1 = new TokenRequestContext().addScopes("https:
DefaultAzureCredential credential = new DefaultAzureCredentialBuilder()
.excludeEnvironmentCredential()
.excludeAzureCliCredential()
.build();
StepVerifier.create(credential.getToken(request1))
.expectErrorMatches(t -> t instanceof RuntimeException && t.getMessage()
.startsWith("Tried "
+ "ManagedIdentityCredential, "
+ "SharedTokenCacheCredential but"))
.verify();
}
} |
This should be synchronized or declare `this.managementChannel` as AtomicReference. | public Mono<EventHubManagementNode> getManagementNode() {
if (isDisposed()) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(String.format(
"connectionId[%s]: Connection is disposed. Cannot get management instance", connectionId))));
}
return getReactorConnection().then(Mono.fromCallable(() -> {
if (managementChannel == null) {
this.managementChannel = new ManagementChannel(
createRequestResponseChannel(MANAGEMENT_SESSION_NAME, MANAGEMENT_LINK_NAME, MANAGEMENT_ADDRESS),
eventHubName, tokenCredential, tokenManagerProvider, this.messageSerializer, scheduler);
}
return managementChannel;
}));
} | if (managementChannel == null) { | public Mono<EventHubManagementNode> getManagementNode() {
if (isDisposed()) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(String.format(
"connectionId[%s]: Connection is disposed. Cannot get management instance", connectionId))));
}
return getReactorConnection().then(Mono.fromCallable(this::getOrCreateManagementChannel));
} | class EventHubReactorAmqpConnection extends ReactorConnection implements EventHubAmqpConnection {
private static final String MANAGEMENT_SESSION_NAME = "mgmt-session";
private static final String MANAGEMENT_LINK_NAME = "mgmt";
private static final String MANAGEMENT_ADDRESS = "$management";
private final ClientLogger logger = new ClientLogger(EventHubReactorAmqpConnection.class);
/**
* Keeps track of the opened send links. Links are key'd by their entityPath. The send link for allowing the service
* load balance messages is the eventHubName.
*/
private final ConcurrentHashMap<String, AmqpSendLink> sendLinks = new ConcurrentHashMap<>();
private final String connectionId;
private final String eventHubName;
private final ReactorProvider reactorProvider;
private final ReactorHandlerProvider handlerProvider;
private final TokenManagerProvider tokenManagerProvider;
private final AmqpRetryOptions retryOptions;
private final MessageSerializer messageSerializer;
private final TokenCredential tokenCredential;
private final Scheduler scheduler;
private volatile ManagementChannel managementChannel;
/**
* Creates a new AMQP connection that uses proton-j.
*
* @param connectionId Identifier for the connection.
* @param connectionOptions A set of options used to create the AMQP connection.
* @param reactorProvider Provides proton-j reactor instances.
* @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events.
* @param tokenManagerProvider Provides a token manager for authorizing with CBS node.
* @param messageSerializer Serializes and deserializes proton-j messages.
*/
public EventHubReactorAmqpConnection(String connectionId, ConnectionOptions connectionOptions, String eventHubName,
ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider,
TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, String product,
String clientVersion) {
super(connectionId, connectionOptions, reactorProvider, handlerProvider, tokenManagerProvider,
messageSerializer, product, clientVersion, SenderSettleMode.SETTLED, ReceiverSettleMode.SECOND);
this.connectionId = connectionId;
this.eventHubName = eventHubName;
this.reactorProvider = reactorProvider;
this.handlerProvider = handlerProvider;
this.tokenManagerProvider = tokenManagerProvider;
this.retryOptions = connectionOptions.getRetry();
this.messageSerializer = messageSerializer;
this.tokenCredential = connectionOptions.getTokenCredential();
this.scheduler = connectionOptions.getScheduler();
}
@Override
/**
* Creates or gets a send link. The same link is returned if there is an existing send link with the same {@code
* linkName}. Otherwise, a new link is created and returned.
*
* @param linkName The name of the link.
* @param entityPath The remote address to connect to for the message broker.
* @param retryOptions Options to use when creating the link.
* @return A new or existing send link that is connected to the given {@code entityPath}.
*/
@Override
public Mono<AmqpSendLink> createSendLink(String linkName, String entityPath, AmqpRetryOptions retryOptions) {
return createSession(entityPath).flatMap(session -> {
logger.verbose("Get or create producer for path: '{}'", entityPath);
final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(retryOptions);
return session.createProducer(linkName, entityPath, retryOptions.getTryTimeout(), retryPolicy)
.cast(AmqpSendLink.class);
});
}
/**
* Creates or gets an existing receive link. The same link is returned if there is an existing receive link with the
* same {@code linkName}. Otherwise, a new link is created and returned.
*
* @param linkName The name of the link.
* @param entityPath The remote address to connect to for the message broker.
* @param eventPosition Position to set the receive link to.
* @param options Consumer options to use when creating the link.
* @return A new or existing receive link that is connected to the given {@code entityPath}.
*/
@Override
public Mono<AmqpReceiveLink> createReceiveLink(String linkName, String entityPath, EventPosition eventPosition,
ReceiveOptions options) {
return createSession(entityPath).cast(EventHubSession.class)
.flatMap(session -> {
logger.verbose("Get or create consumer for path: '{}'", entityPath);
final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(retryOptions);
return session.createConsumer(linkName, entityPath, retryOptions.getTryTimeout(), retryPolicy,
eventPosition, options);
});
}
@Override
public void dispose() {
logger.info("Disposing of connection.");
sendLinks.forEach((key, value) -> value.dispose());
sendLinks.clear();
super.dispose();
}
@Override
protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) {
return new EventHubReactorSession(session, handler, sessionName, reactorProvider, handlerProvider,
getClaimsBasedSecurityNode(), tokenManagerProvider, retryOptions.getTryTimeout(), messageSerializer);
}
} | class EventHubReactorAmqpConnection extends ReactorConnection implements EventHubAmqpConnection {
private static final String MANAGEMENT_SESSION_NAME = "mgmt-session";
private static final String MANAGEMENT_LINK_NAME = "mgmt";
private static final String MANAGEMENT_ADDRESS = "$management";
private final ClientLogger logger = new ClientLogger(EventHubReactorAmqpConnection.class);
/**
* Keeps track of the opened send links. Links are key'd by their entityPath. The send link for allowing the service
* load balance messages is the eventHubName.
*/
private final ConcurrentHashMap<String, AmqpSendLink> sendLinks = new ConcurrentHashMap<>();
private final String connectionId;
private final String eventHubName;
private final ReactorProvider reactorProvider;
private final ReactorHandlerProvider handlerProvider;
private final TokenManagerProvider tokenManagerProvider;
private final AmqpRetryOptions retryOptions;
private final MessageSerializer messageSerializer;
private final TokenCredential tokenCredential;
private final Scheduler scheduler;
private volatile ManagementChannel managementChannel;
/**
* Creates a new AMQP connection that uses proton-j.
*
* @param connectionId Identifier for the connection.
* @param connectionOptions A set of options used to create the AMQP connection.
* @param reactorProvider Provides proton-j reactor instances.
* @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events.
* @param tokenManagerProvider Provides a token manager for authorizing with CBS node.
* @param messageSerializer Serializes and deserializes proton-j messages.
*/
public EventHubReactorAmqpConnection(String connectionId, ConnectionOptions connectionOptions, String eventHubName,
ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider,
TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, String product,
String clientVersion) {
super(connectionId, connectionOptions, reactorProvider, handlerProvider, tokenManagerProvider,
messageSerializer, product, clientVersion, SenderSettleMode.SETTLED, ReceiverSettleMode.SECOND);
this.connectionId = connectionId;
this.eventHubName = eventHubName;
this.reactorProvider = reactorProvider;
this.handlerProvider = handlerProvider;
this.tokenManagerProvider = tokenManagerProvider;
this.retryOptions = connectionOptions.getRetry();
this.tokenCredential = connectionOptions.getTokenCredential();
this.scheduler = connectionOptions.getScheduler();
this.messageSerializer = messageSerializer;
}
@Override
/**
* Creates or gets a send link. The same link is returned if there is an existing send link with the same {@code
* linkName}. Otherwise, a new link is created and returned.
*
* @param linkName The name of the link.
* @param entityPath The remote address to connect to for the message broker.
* @param retryOptions Options to use when creating the link.
* @return A new or existing send link that is connected to the given {@code entityPath}.
*/
@Override
public Mono<AmqpSendLink> createSendLink(String linkName, String entityPath, AmqpRetryOptions retryOptions) {
return createSession(entityPath).flatMap(session -> {
logger.verbose("Get or create producer for path: '{}'", entityPath);
final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(retryOptions);
return session.createProducer(linkName, entityPath, retryOptions.getTryTimeout(), retryPolicy)
.cast(AmqpSendLink.class);
});
}
/**
* Creates or gets an existing receive link. The same link is returned if there is an existing receive link with the
* same {@code linkName}. Otherwise, a new link is created and returned.
*
* @param linkName The name of the link.
* @param entityPath The remote address to connect to for the message broker.
* @param eventPosition Position to set the receive link to.
* @param options Consumer options to use when creating the link.
* @return A new or existing receive link that is connected to the given {@code entityPath}.
*/
@Override
public Mono<AmqpReceiveLink> createReceiveLink(String linkName, String entityPath, EventPosition eventPosition,
ReceiveOptions options) {
return createSession(entityPath).cast(EventHubSession.class)
.flatMap(session -> {
logger.verbose("Get or create consumer for path: '{}'", entityPath);
final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(retryOptions);
return session.createConsumer(linkName, entityPath, retryOptions.getTryTimeout(), retryPolicy,
eventPosition, options);
});
}
@Override
public void dispose() {
logger.info("Disposing of connection.");
sendLinks.forEach((key, value) -> value.dispose());
sendLinks.clear();
super.dispose();
}
@Override
protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) {
return new EventHubReactorSession(session, handler, sessionName, reactorProvider, handlerProvider,
getClaimsBasedSecurityNode(), tokenManagerProvider, retryOptions.getTryTimeout(), messageSerializer);
}
private synchronized ManagementChannel getOrCreateManagementChannel() {
if (managementChannel == null) {
managementChannel = new ManagementChannel(
createRequestResponseChannel(MANAGEMENT_SESSION_NAME, MANAGEMENT_LINK_NAME, MANAGEMENT_ADDRESS),
eventHubName, tokenCredential, tokenManagerProvider, this.messageSerializer, scheduler);
}
return managementChannel;
}
} |
There is Assertions.assertThrows. | void createBatchWhenSizeTooBigThanOnSendLink() {
int maxLinkSize = 1024;
int batchSize = maxLinkSize + 10;
final CreateBatchOptions options = new CreateBatchOptions().setMaximumSizeInBytes(batchSize);
when(asyncSender.createBatch(options)).thenThrow(new IllegalArgumentException("too large size"));
try {
sender.createBatch(options);
Assertions.fail("Should not have created batch because batchSize is bigger than the size on SenderLink.");
} catch (Exception ex) {
Assertions.assertTrue(ex instanceof IllegalArgumentException);
}
verify(asyncSender, times(1)).createBatch(options);
} | sender.createBatch(options); | void createBatchWhenSizeTooBigThanOnSendLink() {
int maxLinkSize = 1024;
int batchSize = maxLinkSize + 10;
final CreateBatchOptions options = new CreateBatchOptions().setMaximumSizeInBytes(batchSize);
when(asyncSender.createBatch(options)).thenThrow(new IllegalArgumentException("too large size"));
Assertions.assertThrows(IllegalArgumentException.class, () -> sender.createBatch(options));
verify(asyncSender, times(1)).createBatch(options);
} | class ServiceBusSenderClientTest {
private static final String NAMESPACE = "my-namespace";
private static final String ENTITY_NAME = "my-servicebus-entity";
@Mock
private ErrorContextProvider errorContextProvider;
@Mock
private ServiceBusSenderAsyncClient asyncSender;
@Captor
private ArgumentCaptor<ServiceBusMessage> singleMessageCaptor;
@Captor
private ArgumentCaptor<ServiceBusMessageBatch> messageBatchCaptor;
private MessageSerializer serializer = new ServiceBusMessageSerializer();
private TracerProvider tracerProvider = new TracerProvider(Collections.emptyList());
private final MessageSerializer messageSerializer = new ServiceBusMessageSerializer();
private final AmqpRetryOptions retryOptions = new AmqpRetryOptions()
.setDelay(Duration.ofMillis(500))
.setMode(AmqpRetryMode.FIXED)
.setTryTimeout(Duration.ofSeconds(10));
private ServiceBusSenderClient sender;
private static final String TEST_CONTENTS = "My message for service bus queue!";
@BeforeAll
static void beforeAll() {
StepVerifier.setDefaultTimeout(Duration.ofSeconds(30));
}
@AfterAll
static void afterAll() {
StepVerifier.resetDefaultTimeout();
}
@BeforeEach
void setup() {
MockitoAnnotations.initMocks(this);
when(asyncSender.getEntityPath()).thenReturn(ENTITY_NAME);
when(asyncSender.getFullyQualifiedNamespace()).thenReturn(NAMESPACE);
sender = new ServiceBusSenderClient(asyncSender, retryOptions.getTryTimeout());
}
@AfterEach
void teardown() {
sender.close();
singleMessageCaptor = null;
messageBatchCaptor = null;
Mockito.framework().clearInlineMocks();
}
@Test
void verifyProperties() {
Assertions.assertEquals(ENTITY_NAME, sender.getEntityPath());
Assertions.assertEquals(NAMESPACE, sender.getFullyQualifiedNamespace());
}
/**
* Verifies that an exception is thrown when we create a batch with null options.
*/
@Test
void createBatchNull() {
Assertions.assertThrows(NullPointerException.class, () -> sender.createBatch(null));
}
/**
* Verifies that the default batch is the same size as the message link.
*/
@Test
void createBatchDefault() {
ServiceBusMessageBatch batch = new ServiceBusMessageBatch(MAX_MESSAGE_LENGTH_BYTES, null, null,
null);
when(asyncSender.createBatch()).thenReturn(Mono.just(batch));
ServiceBusMessageBatch batchMessage = sender.createBatch();
Assertions.assertEquals(MAX_MESSAGE_LENGTH_BYTES, batchMessage.getMaxSizeInBytes());
Assertions.assertEquals(0, batchMessage.getCount());
verify(asyncSender, times(1)).createBatch();
}
/**
* Verifies we cannot create a batch if the options size is larger than the link.
*/
@Test
/**
* Verifies that the producer can create a batch with a given {@link CreateBatchOptions
*/
@Test
void createsMessageBatchWithSize() {
int maxLinkSize = 10000;
int batchSize = 1024;
int eventOverhead = 46;
int maxEventPayload = batchSize - eventOverhead;
final ServiceBusMessage message = new ServiceBusMessage(new byte[maxEventPayload]);
final ServiceBusMessage tooLargeMessage = new ServiceBusMessage(new byte[maxEventPayload + 1]);
final CreateBatchOptions options = new CreateBatchOptions().setMaximumSizeInBytes(batchSize);
ServiceBusMessageBatch batch = new ServiceBusMessageBatch(batchSize, null, tracerProvider,
messageSerializer);
when(asyncSender.createBatch(options)).thenReturn(Mono.just(batch));
ServiceBusMessageBatch messageBatch = sender.createBatch(options);
Assertions.assertEquals(batchSize, messageBatch.getMaxSizeInBytes());
Assertions.assertTrue(messageBatch.tryAdd(message));
Assertions.assertFalse(messageBatch.tryAdd(tooLargeMessage));
}
/**
* Verifies that sending multiple message will result in calling sender.send(MessageBatch).
*/
@Test
void sendMultipleMessages() {
final int count = 4;
final byte[] contents = TEST_CONTENTS.getBytes(UTF_8);
final ServiceBusMessageBatch batch = new ServiceBusMessageBatch(256 * 1024,
errorContextProvider, tracerProvider, serializer);
IntStream.range(0, count).forEach(index -> {
final ServiceBusMessage message = new ServiceBusMessage(contents);
Assertions.assertTrue(batch.tryAdd(message));
});
when(asyncSender.send(batch)).thenReturn(Mono.empty());
sender.send(batch);
verify(asyncSender).send(messageBatchCaptor.capture());
final ServiceBusMessageBatch messagesSent = messageBatchCaptor.getValue();
Assertions.assertEquals(count, messagesSent.getCount());
messagesSent.getMessages().forEach(message -> Assertions.assertArrayEquals(contents, message.getBody()));
}
/**
* Verifies that sending a single message will result in calling sender.send(Message).
*/
@Test
void sendSingleMessage() {
final ServiceBusMessage testData =
new ServiceBusMessage(TEST_CONTENTS.getBytes(UTF_8));
when(asyncSender.send(testData)).thenReturn(Mono.empty());
sender.send(testData);
verify(asyncSender, times(1)).send(testData);
verify(asyncSender).send(singleMessageCaptor.capture());
final ServiceBusMessage message = singleMessageCaptor.getValue();
Assertions.assertArrayEquals(testData.getBody(), message.getBody());
}
} | class ServiceBusSenderClientTest {
private static final String NAMESPACE = "my-namespace";
private static final String ENTITY_NAME = "my-servicebus-entity";
@Mock
private ServiceBusSenderAsyncClient asyncSender;
@Captor
private ArgumentCaptor<ServiceBusMessage> singleMessageCaptor;
private ServiceBusSenderClient sender;
private static final Duration RETRY_TIMEOUT = Duration.ofSeconds(10);
private static final String TEST_CONTENTS = "My message for service bus queue!";
@BeforeAll
static void beforeAll() {
StepVerifier.setDefaultTimeout(Duration.ofSeconds(30));
}
@AfterAll
static void afterAll() {
StepVerifier.resetDefaultTimeout();
}
@BeforeEach
void setup() {
MockitoAnnotations.initMocks(this);
when(asyncSender.getEntityPath()).thenReturn(ENTITY_NAME);
when(asyncSender.getFullyQualifiedNamespace()).thenReturn(NAMESPACE);
sender = new ServiceBusSenderClient(asyncSender, RETRY_TIMEOUT);
}
@AfterEach
void teardown() {
sender.close();
singleMessageCaptor = null;
Mockito.framework().clearInlineMocks();
}
@Test
void verifyProperties() {
Assertions.assertEquals(ENTITY_NAME, sender.getEntityPath());
Assertions.assertEquals(NAMESPACE, sender.getFullyQualifiedNamespace());
}
/**
* Verifies that an exception is thrown when we create a batch with null options.
*/
@Test
void createBatchNull() {
Assertions.assertThrows(NullPointerException.class, () -> sender.createBatch(null));
}
/**
* Verifies that the default batch is the same size as the message link.
*/
@Test
void createBatchDefault() {
ServiceBusMessageBatch batch = new ServiceBusMessageBatch(MAX_MESSAGE_LENGTH_BYTES, null, null,
null);
when(asyncSender.createBatch()).thenReturn(Mono.just(batch));
ServiceBusMessageBatch batchMessage = sender.createBatch();
Assertions.assertEquals(MAX_MESSAGE_LENGTH_BYTES, batchMessage.getMaxSizeInBytes());
Assertions.assertEquals(0, batchMessage.getCount());
verify(asyncSender).createBatch();
}
/**
* Verifies we cannot create a batch if the options size is larger than the link.
*/
@Test
/**
* Verifies that the producer can create a batch with a given {@link CreateBatchOptions
*/
@Test
void createsMessageBatchWithSize() {
int batchSize = 1024;
final CreateBatchOptions options = new CreateBatchOptions().setMaximumSizeInBytes(batchSize);
final ServiceBusMessageBatch batch = new ServiceBusMessageBatch(batchSize, null, null,
null);
when(asyncSender.createBatch(options)).thenReturn(Mono.just(batch));
ServiceBusMessageBatch messageBatch = sender.createBatch(options);
Assertions.assertEquals(batch, messageBatch);
}
/**
* Verifies that sending a single message will result in calling sender.send(Message).
*/
@Test
void sendSingleMessage() {
final ServiceBusMessage testData =
new ServiceBusMessage(TEST_CONTENTS.getBytes(UTF_8));
when(asyncSender.send(testData)).thenReturn(Mono.empty());
sender.send(testData);
verify(asyncSender, times(1)).send(testData);
verify(asyncSender).send(singleMessageCaptor.capture());
final ServiceBusMessage message = singleMessageCaptor.getValue();
Assertions.assertArrayEquals(testData.getBody(), message.getBody());
}
} |
You don't need all this logic. You can assert that the correct properties for the batch were passed to asyncClient. We assume asyncClient does the right thing | void createsMessageBatchWithSize() {
int maxLinkSize = 10000;
int batchSize = 1024;
int eventOverhead = 46;
int maxEventPayload = batchSize - eventOverhead;
final ServiceBusMessage message = new ServiceBusMessage(new byte[maxEventPayload]);
final ServiceBusMessage tooLargeMessage = new ServiceBusMessage(new byte[maxEventPayload + 1]);
final CreateBatchOptions options = new CreateBatchOptions().setMaximumSizeInBytes(batchSize);
ServiceBusMessageBatch batch = new ServiceBusMessageBatch(batchSize, null, tracerProvider,
messageSerializer);
when(asyncSender.createBatch(options)).thenReturn(Mono.just(batch));
ServiceBusMessageBatch messageBatch = sender.createBatch(options);
Assertions.assertEquals(batchSize, messageBatch.getMaxSizeInBytes());
Assertions.assertTrue(messageBatch.tryAdd(message));
Assertions.assertFalse(messageBatch.tryAdd(tooLargeMessage));
} | int eventOverhead = 46; | void createsMessageBatchWithSize() {
int batchSize = 1024;
final CreateBatchOptions options = new CreateBatchOptions().setMaximumSizeInBytes(batchSize);
final ServiceBusMessageBatch batch = new ServiceBusMessageBatch(batchSize, null, null,
null);
when(asyncSender.createBatch(options)).thenReturn(Mono.just(batch));
ServiceBusMessageBatch messageBatch = sender.createBatch(options);
Assertions.assertEquals(batch, messageBatch);
} | class ServiceBusSenderClientTest {
private static final String NAMESPACE = "my-namespace";
private static final String ENTITY_NAME = "my-servicebus-entity";
@Mock
private ErrorContextProvider errorContextProvider;
@Mock
private ServiceBusSenderAsyncClient asyncSender;
@Captor
private ArgumentCaptor<ServiceBusMessage> singleMessageCaptor;
@Captor
private ArgumentCaptor<ServiceBusMessageBatch> messageBatchCaptor;
private MessageSerializer serializer = new ServiceBusMessageSerializer();
private TracerProvider tracerProvider = new TracerProvider(Collections.emptyList());
private final MessageSerializer messageSerializer = new ServiceBusMessageSerializer();
private final AmqpRetryOptions retryOptions = new AmqpRetryOptions()
.setDelay(Duration.ofMillis(500))
.setMode(AmqpRetryMode.FIXED)
.setTryTimeout(Duration.ofSeconds(10));
private ServiceBusSenderClient sender;
private static final String TEST_CONTENTS = "My message for service bus queue!";
@BeforeAll
static void beforeAll() {
StepVerifier.setDefaultTimeout(Duration.ofSeconds(30));
}
@AfterAll
static void afterAll() {
StepVerifier.resetDefaultTimeout();
}
@BeforeEach
void setup() {
MockitoAnnotations.initMocks(this);
when(asyncSender.getEntityPath()).thenReturn(ENTITY_NAME);
when(asyncSender.getFullyQualifiedNamespace()).thenReturn(NAMESPACE);
sender = new ServiceBusSenderClient(asyncSender, retryOptions.getTryTimeout());
}
@AfterEach
void teardown() {
sender.close();
singleMessageCaptor = null;
messageBatchCaptor = null;
Mockito.framework().clearInlineMocks();
}
@Test
void verifyProperties() {
Assertions.assertEquals(ENTITY_NAME, sender.getEntityPath());
Assertions.assertEquals(NAMESPACE, sender.getFullyQualifiedNamespace());
}
/**
* Verifies that an exception is thrown when we create a batch with null options.
*/
@Test
void createBatchNull() {
Assertions.assertThrows(NullPointerException.class, () -> sender.createBatch(null));
}
/**
* Verifies that the default batch is the same size as the message link.
*/
@Test
void createBatchDefault() {
ServiceBusMessageBatch batch = new ServiceBusMessageBatch(MAX_MESSAGE_LENGTH_BYTES, null, null,
null);
when(asyncSender.createBatch()).thenReturn(Mono.just(batch));
ServiceBusMessageBatch batchMessage = sender.createBatch();
Assertions.assertEquals(MAX_MESSAGE_LENGTH_BYTES, batchMessage.getMaxSizeInBytes());
Assertions.assertEquals(0, batchMessage.getCount());
verify(asyncSender, times(1)).createBatch();
}
/**
* Verifies we cannot create a batch if the options size is larger than the link.
*/
@Test
void createBatchWhenSizeTooBigThanOnSendLink() {
int maxLinkSize = 1024;
int batchSize = maxLinkSize + 10;
final CreateBatchOptions options = new CreateBatchOptions().setMaximumSizeInBytes(batchSize);
when(asyncSender.createBatch(options)).thenThrow(new IllegalArgumentException("too large size"));
try {
sender.createBatch(options);
Assertions.fail("Should not have created batch because batchSize is bigger than the size on SenderLink.");
} catch (Exception ex) {
Assertions.assertTrue(ex instanceof IllegalArgumentException);
}
verify(asyncSender, times(1)).createBatch(options);
}
/**
* Verifies that the producer can create a batch with a given {@link CreateBatchOptions
*/
@Test
/**
* Verifies that sending multiple message will result in calling sender.send(MessageBatch).
*/
@Test
void sendMultipleMessages() {
final int count = 4;
final byte[] contents = TEST_CONTENTS.getBytes(UTF_8);
final ServiceBusMessageBatch batch = new ServiceBusMessageBatch(256 * 1024,
errorContextProvider, tracerProvider, serializer);
IntStream.range(0, count).forEach(index -> {
final ServiceBusMessage message = new ServiceBusMessage(contents);
Assertions.assertTrue(batch.tryAdd(message));
});
when(asyncSender.send(batch)).thenReturn(Mono.empty());
sender.send(batch);
verify(asyncSender).send(messageBatchCaptor.capture());
final ServiceBusMessageBatch messagesSent = messageBatchCaptor.getValue();
Assertions.assertEquals(count, messagesSent.getCount());
messagesSent.getMessages().forEach(message -> Assertions.assertArrayEquals(contents, message.getBody()));
}
/**
* Verifies that sending a single message will result in calling sender.send(Message).
*/
@Test
void sendSingleMessage() {
final ServiceBusMessage testData =
new ServiceBusMessage(TEST_CONTENTS.getBytes(UTF_8));
when(asyncSender.send(testData)).thenReturn(Mono.empty());
sender.send(testData);
verify(asyncSender, times(1)).send(testData);
verify(asyncSender).send(singleMessageCaptor.capture());
final ServiceBusMessage message = singleMessageCaptor.getValue();
Assertions.assertArrayEquals(testData.getBody(), message.getBody());
}
} | class ServiceBusSenderClientTest {
private static final String NAMESPACE = "my-namespace";
private static final String ENTITY_NAME = "my-servicebus-entity";
@Mock
private ServiceBusSenderAsyncClient asyncSender;
@Captor
private ArgumentCaptor<ServiceBusMessage> singleMessageCaptor;
private ServiceBusSenderClient sender;
private static final Duration RETRY_TIMEOUT = Duration.ofSeconds(10);
private static final String TEST_CONTENTS = "My message for service bus queue!";
@BeforeAll
static void beforeAll() {
StepVerifier.setDefaultTimeout(Duration.ofSeconds(30));
}
@AfterAll
static void afterAll() {
StepVerifier.resetDefaultTimeout();
}
@BeforeEach
void setup() {
MockitoAnnotations.initMocks(this);
when(asyncSender.getEntityPath()).thenReturn(ENTITY_NAME);
when(asyncSender.getFullyQualifiedNamespace()).thenReturn(NAMESPACE);
sender = new ServiceBusSenderClient(asyncSender, RETRY_TIMEOUT);
}
@AfterEach
void teardown() {
sender.close();
singleMessageCaptor = null;
Mockito.framework().clearInlineMocks();
}
@Test
void verifyProperties() {
Assertions.assertEquals(ENTITY_NAME, sender.getEntityPath());
Assertions.assertEquals(NAMESPACE, sender.getFullyQualifiedNamespace());
}
/**
* Verifies that an exception is thrown when we create a batch with null options.
*/
@Test
void createBatchNull() {
Assertions.assertThrows(NullPointerException.class, () -> sender.createBatch(null));
}
/**
* Verifies that the default batch is the same size as the message link.
*/
@Test
void createBatchDefault() {
ServiceBusMessageBatch batch = new ServiceBusMessageBatch(MAX_MESSAGE_LENGTH_BYTES, null, null,
null);
when(asyncSender.createBatch()).thenReturn(Mono.just(batch));
ServiceBusMessageBatch batchMessage = sender.createBatch();
Assertions.assertEquals(MAX_MESSAGE_LENGTH_BYTES, batchMessage.getMaxSizeInBytes());
Assertions.assertEquals(0, batchMessage.getCount());
verify(asyncSender).createBatch();
}
/**
* Verifies we cannot create a batch if the options size is larger than the link.
*/
@Test
void createBatchWhenSizeTooBigThanOnSendLink() {
int maxLinkSize = 1024;
int batchSize = maxLinkSize + 10;
final CreateBatchOptions options = new CreateBatchOptions().setMaximumSizeInBytes(batchSize);
when(asyncSender.createBatch(options)).thenThrow(new IllegalArgumentException("too large size"));
Assertions.assertThrows(IllegalArgumentException.class, () -> sender.createBatch(options));
verify(asyncSender, times(1)).createBatch(options);
}
/**
* Verifies that the producer can create a batch with a given {@link CreateBatchOptions
*/
@Test
/**
* Verifies that sending a single message will result in calling sender.send(Message).
*/
@Test
void sendSingleMessage() {
final ServiceBusMessage testData =
new ServiceBusMessage(TEST_CONTENTS.getBytes(UTF_8));
when(asyncSender.send(testData)).thenReturn(Mono.empty());
sender.send(testData);
verify(asyncSender, times(1)).send(testData);
verify(asyncSender).send(singleMessageCaptor.capture());
final ServiceBusMessage message = singleMessageCaptor.getValue();
Assertions.assertArrayEquals(testData.getBody(), message.getBody());
}
} |
```suggestion final ServiceBusMessageBatch batch = new ServiceBusMessageBatch(batchSize, null, null, ``` | void createsMessageBatchWithSize() {
int batchSize = 1024;
final CreateBatchOptions options = new CreateBatchOptions().setMaximumSizeInBytes(batchSize);
ServiceBusMessageBatch batch = new ServiceBusMessageBatch(batchSize, null, null,
null);
when(asyncSender.createBatch(options)).thenReturn(Mono.just(batch));
ServiceBusMessageBatch messageBatch = sender.createBatch(options);
Assertions.assertEquals(batch, messageBatch);
} | ServiceBusMessageBatch batch = new ServiceBusMessageBatch(batchSize, null, null, | void createsMessageBatchWithSize() {
int batchSize = 1024;
final CreateBatchOptions options = new CreateBatchOptions().setMaximumSizeInBytes(batchSize);
final ServiceBusMessageBatch batch = new ServiceBusMessageBatch(batchSize, null, null,
null);
when(asyncSender.createBatch(options)).thenReturn(Mono.just(batch));
ServiceBusMessageBatch messageBatch = sender.createBatch(options);
Assertions.assertEquals(batch, messageBatch);
} | class ServiceBusSenderClientTest {
private static final String NAMESPACE = "my-namespace";
private static final String ENTITY_NAME = "my-servicebus-entity";
@Mock
private ServiceBusSenderAsyncClient asyncSender;
@Captor
private ArgumentCaptor<ServiceBusMessage> singleMessageCaptor;
private ServiceBusSenderClient sender;
private static final Duration RETRY_TIMEOUT = Duration.ofSeconds(10);
private static final String TEST_CONTENTS = "My message for service bus queue!";
@BeforeAll
static void beforeAll() {
StepVerifier.setDefaultTimeout(Duration.ofSeconds(30));
}
@AfterAll
static void afterAll() {
StepVerifier.resetDefaultTimeout();
}
@BeforeEach
void setup() {
MockitoAnnotations.initMocks(this);
when(asyncSender.getEntityPath()).thenReturn(ENTITY_NAME);
when(asyncSender.getFullyQualifiedNamespace()).thenReturn(NAMESPACE);
sender = new ServiceBusSenderClient(asyncSender, RETRY_TIMEOUT);
}
@AfterEach
void teardown() {
sender.close();
singleMessageCaptor = null;
Mockito.framework().clearInlineMocks();
}
@Test
void verifyProperties() {
Assertions.assertEquals(ENTITY_NAME, sender.getEntityPath());
Assertions.assertEquals(NAMESPACE, sender.getFullyQualifiedNamespace());
}
/**
* Verifies that an exception is thrown when we create a batch with null options.
*/
@Test
void createBatchNull() {
Assertions.assertThrows(NullPointerException.class, () -> sender.createBatch(null));
}
/**
* Verifies that the default batch is the same size as the message link.
*/
@Test
void createBatchDefault() {
ServiceBusMessageBatch batch = new ServiceBusMessageBatch(MAX_MESSAGE_LENGTH_BYTES, null, null,
null);
when(asyncSender.createBatch()).thenReturn(Mono.just(batch));
ServiceBusMessageBatch batchMessage = sender.createBatch();
Assertions.assertEquals(MAX_MESSAGE_LENGTH_BYTES, batchMessage.getMaxSizeInBytes());
Assertions.assertEquals(0, batchMessage.getCount());
verify(asyncSender).createBatch();
}
/**
* Verifies we cannot create a batch if the options size is larger than the link.
*/
@Test
void createBatchWhenSizeTooBigThanOnSendLink() {
int maxLinkSize = 1024;
int batchSize = maxLinkSize + 10;
final CreateBatchOptions options = new CreateBatchOptions().setMaximumSizeInBytes(batchSize);
when(asyncSender.createBatch(options)).thenThrow(new IllegalArgumentException("too large size"));
Assertions.assertThrows(IllegalArgumentException.class, () -> sender.createBatch(options));
verify(asyncSender, times(1)).createBatch(options);
}
/**
* Verifies that the producer can create a batch with a given {@link CreateBatchOptions
*/
@Test
/**
* Verifies that sending a single message will result in calling sender.send(Message).
*/
@Test
void sendSingleMessage() {
final ServiceBusMessage testData =
new ServiceBusMessage(TEST_CONTENTS.getBytes(UTF_8));
when(asyncSender.send(testData)).thenReturn(Mono.empty());
sender.send(testData);
verify(asyncSender, times(1)).send(testData);
verify(asyncSender).send(singleMessageCaptor.capture());
final ServiceBusMessage message = singleMessageCaptor.getValue();
Assertions.assertArrayEquals(testData.getBody(), message.getBody());
}
} | class ServiceBusSenderClientTest {
private static final String NAMESPACE = "my-namespace";
private static final String ENTITY_NAME = "my-servicebus-entity";
@Mock
private ServiceBusSenderAsyncClient asyncSender;
@Captor
private ArgumentCaptor<ServiceBusMessage> singleMessageCaptor;
private ServiceBusSenderClient sender;
private static final Duration RETRY_TIMEOUT = Duration.ofSeconds(10);
private static final String TEST_CONTENTS = "My message for service bus queue!";
@BeforeAll
static void beforeAll() {
StepVerifier.setDefaultTimeout(Duration.ofSeconds(30));
}
@AfterAll
static void afterAll() {
StepVerifier.resetDefaultTimeout();
}
@BeforeEach
void setup() {
MockitoAnnotations.initMocks(this);
when(asyncSender.getEntityPath()).thenReturn(ENTITY_NAME);
when(asyncSender.getFullyQualifiedNamespace()).thenReturn(NAMESPACE);
sender = new ServiceBusSenderClient(asyncSender, RETRY_TIMEOUT);
}
@AfterEach
void teardown() {
sender.close();
singleMessageCaptor = null;
Mockito.framework().clearInlineMocks();
}
@Test
void verifyProperties() {
Assertions.assertEquals(ENTITY_NAME, sender.getEntityPath());
Assertions.assertEquals(NAMESPACE, sender.getFullyQualifiedNamespace());
}
/**
* Verifies that an exception is thrown when we create a batch with null options.
*/
@Test
void createBatchNull() {
Assertions.assertThrows(NullPointerException.class, () -> sender.createBatch(null));
}
/**
* Verifies that the default batch is the same size as the message link.
*/
@Test
void createBatchDefault() {
ServiceBusMessageBatch batch = new ServiceBusMessageBatch(MAX_MESSAGE_LENGTH_BYTES, null, null,
null);
when(asyncSender.createBatch()).thenReturn(Mono.just(batch));
ServiceBusMessageBatch batchMessage = sender.createBatch();
Assertions.assertEquals(MAX_MESSAGE_LENGTH_BYTES, batchMessage.getMaxSizeInBytes());
Assertions.assertEquals(0, batchMessage.getCount());
verify(asyncSender).createBatch();
}
/**
* Verifies we cannot create a batch if the options size is larger than the link.
*/
@Test
void createBatchWhenSizeTooBigThanOnSendLink() {
int maxLinkSize = 1024;
int batchSize = maxLinkSize + 10;
final CreateBatchOptions options = new CreateBatchOptions().setMaximumSizeInBytes(batchSize);
when(asyncSender.createBatch(options)).thenThrow(new IllegalArgumentException("too large size"));
Assertions.assertThrows(IllegalArgumentException.class, () -> sender.createBatch(options));
verify(asyncSender, times(1)).createBatch(options);
}
/**
* Verifies that the producer can create a batch with a given {@link CreateBatchOptions
*/
@Test
/**
* Verifies that sending a single message will result in calling sender.send(Message).
*/
@Test
void sendSingleMessage() {
final ServiceBusMessage testData =
new ServiceBusMessage(TEST_CONTENTS.getBytes(UTF_8));
when(asyncSender.send(testData)).thenReturn(Mono.empty());
sender.send(testData);
verify(asyncSender, times(1)).send(testData);
verify(asyncSender).send(singleMessageCaptor.capture());
final ServiceBusMessage message = singleMessageCaptor.getValue();
Assertions.assertArrayEquals(testData.getBody(), message.getBody());
}
} |
Instead of using null, null.. can you use getFullyQualifiedDomainName() and getEventHubName() ? These properties are set in IntegrationTestBase. | public void sendSmallEventsFullBatchPartitionKey() {
final EventDataBatch batch = new EventDataBatch(ClientConstants.MAX_MESSAGE_LENGTH_BYTES, null, PARTITION_KEY, contextProvider,
new TracerProvider(Collections.emptyList()), null, null);
int count = 0;
while (batch.tryAdd(createData())) {
if (count % 100 == 0) {
logger.verbose("Batch size: {}", batch.getCount());
}
count++;
}
StepVerifier.create(producer.send(batch.getEvents()))
.verifyComplete();
} | new TracerProvider(Collections.emptyList()), null, null); | public void sendSmallEventsFullBatchPartitionKey() {
final EventDataBatch batch = new EventDataBatch(ClientConstants.MAX_MESSAGE_LENGTH_BYTES, null, PARTITION_KEY, contextProvider,
new TracerProvider(Collections.emptyList()), getFullyQualifiedDomainName(), getEventHubName());
int count = 0;
while (batch.tryAdd(createData())) {
if (count % 100 == 0) {
logger.verbose("Batch size: {}", batch.getCount());
}
count++;
}
StepVerifier.create(producer.send(batch.getEvents()))
.verifyComplete();
} | class EventDataBatchIntegrationTest extends IntegrationTestBase {
private static final String PARTITION_KEY = "PartitionIDCopyFromProducerOption";
private EventHubAsyncClient client;
private EventHubProducerAsyncClient producer;
@Mock
private ErrorContextProvider contextProvider;
public EventDataBatchIntegrationTest() {
super(new ClientLogger(EventDataBatchIntegrationTest.class));
}
@Override
protected void beforeTest() {
MockitoAnnotations.initMocks(this);
client = createBuilder().shareConnection().buildAsyncClient();
producer = client.createProducer();
}
@Override
protected void afterTest() {
dispose(producer, client);
}
/**
* Test for sending full batch without partition key
*/
@Test
public void sendSmallEventsFullBatch() {
final EventDataBatch batch = new EventDataBatch(ClientConstants.MAX_MESSAGE_LENGTH_BYTES, null, null, contextProvider,
new TracerProvider(Collections.emptyList()), null, null);
int count = 0;
while (batch.tryAdd(createData())) {
if (count % 100 == 0) {
logger.verbose("Batch size: {}", batch.getCount());
}
count++;
}
StepVerifier.create(producer.send(batch.getEvents()))
.verifyComplete();
}
/**
* Test for sending a message batch that is {@link ClientConstants
*/
@Test
/**
* Verifies that when we send 10 messages with the same partition key and some application properties, the received
* EventData also contains the {@link EventData
*/
@Test
public void sendBatchPartitionKeyValidate() throws InterruptedException {
final String messageValue = UUID.randomUUID().toString();
final SendOptions sendOptions = new SendOptions().setPartitionKey(PARTITION_KEY);
final EventDataBatch batch = new EventDataBatch(ClientConstants.MAX_MESSAGE_LENGTH_BYTES, null, PARTITION_KEY, contextProvider,
new TracerProvider(Collections.emptyList()), null, null);
int count = 0;
while (count < 10) {
final EventData data = createData();
data.getProperties().put(MESSAGE_TRACKING_ID, messageValue);
if (!batch.tryAdd(data)) {
break;
}
count++;
}
final CountDownLatch countDownLatch = new CountDownLatch(batch.getCount());
final List<EventHubConsumerAsyncClient> consumers = new ArrayList<>();
try {
final List<String> partitionIds = client.getPartitionIds().collectList().block(TIMEOUT);
Assertions.assertNotNull(partitionIds);
for (String id : partitionIds) {
final EventHubConsumerAsyncClient consumer =
client.createConsumer(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME, EventHubClientBuilder.DEFAULT_PREFETCH_COUNT);
consumers.add(consumer);
consumer.receiveFromPartition(id, EventPosition.latest()).subscribe(partitionEvent -> {
EventData event = partitionEvent.getData();
if (event.getPartitionKey() == null || !PARTITION_KEY.equals(event.getPartitionKey())) {
return;
}
if (isMatchingEvent(event, messageValue)) {
logger.info("Event[{}] matched. Countdown: {}", event.getSequenceNumber(), countDownLatch.getCount());
countDownLatch.countDown();
} else {
logger.warning(String.format("Event[%s] matched partition key, but not GUID. Expected: %s. Actual: %s",
event.getSequenceNumber(), messageValue, event.getProperties().get(MESSAGE_TRACKING_ID)));
}
}, error -> {
Assertions.fail("An error should not have occurred:" + error.toString());
}, () -> {
logger.info("Disposing of consumer now that the receive is complete.");
dispose(consumer);
});
}
producer.send(batch.getEvents(), sendOptions).block();
countDownLatch.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS);
} finally {
logger.info("Disposing of subscriptions.");
dispose(consumers.toArray(new EventHubConsumerAsyncClient[0]));
}
Assertions.assertEquals(0, countDownLatch.getCount());
}
/**
* Verify we can send a batch by specifying the {@code maxMessageSize} and partition key.
*/
@Test
public void sendEventsFullBatchWithPartitionKey() {
final int maxMessageSize = 1024;
final EventDataBatch batch = new EventDataBatch(maxMessageSize, null, PARTITION_KEY, contextProvider,
new TracerProvider(Collections.emptyList()), null, null);
final Random random = new Random();
final SendOptions sendOptions = new SendOptions().setPartitionKey(PARTITION_KEY);
int count = 0;
while (true) {
final EventData eventData = new EventData("a".getBytes());
for (int i = 0; i < random.nextInt(20); i++) {
eventData.getProperties().put("key" + i, "value");
}
if (batch.tryAdd(eventData)) {
count++;
} else {
break;
}
}
Assertions.assertEquals(count, batch.getCount());
StepVerifier.create(producer.send(batch.getEvents(), sendOptions))
.verifyComplete();
}
private static EventData createData() {
return new EventData("a".getBytes(StandardCharsets.UTF_8));
}
} | class EventDataBatchIntegrationTest extends IntegrationTestBase {
private static final String PARTITION_KEY = "PartitionIDCopyFromProducerOption";
private EventHubProducerAsyncClient producer;
private EventHubClientBuilder builder;
@Mock
private ErrorContextProvider contextProvider;
public EventDataBatchIntegrationTest() {
super(new ClientLogger(EventDataBatchIntegrationTest.class));
}
@Override
protected void beforeTest() {
MockitoAnnotations.initMocks(this);
builder = createBuilder()
.shareConnection()
.consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME)
.prefetchCount(EventHubClientBuilder.DEFAULT_PREFETCH_COUNT);
producer = builder.buildAsyncProducerClient();
}
@Override
protected void afterTest() {
dispose(producer);
}
/**
* Test for sending full batch without partition key
*/
@Test
public void sendSmallEventsFullBatch() {
final EventDataBatch batch = new EventDataBatch(ClientConstants.MAX_MESSAGE_LENGTH_BYTES, null, null, contextProvider,
new TracerProvider(Collections.emptyList()), getFullyQualifiedDomainName(), getEventHubName());
int count = 0;
while (batch.tryAdd(createData())) {
if (count % 100 == 0) {
logger.verbose("Batch size: {}", batch.getCount());
}
count++;
}
StepVerifier.create(producer.send(batch.getEvents()))
.verifyComplete();
}
/**
* Test for sending a message batch that is {@link ClientConstants
*/
@Test
/**
* Verifies that when we send 10 messages with the same partition key and some application properties, the received
* EventData also contains the {@link EventData
*/
@Test
public void sendBatchPartitionKeyValidate() throws InterruptedException {
final String messageValue = UUID.randomUUID().toString();
final SendOptions sendOptions = new SendOptions().setPartitionKey(PARTITION_KEY);
final EventDataBatch batch = new EventDataBatch(ClientConstants.MAX_MESSAGE_LENGTH_BYTES, null, PARTITION_KEY, contextProvider,
new TracerProvider(Collections.emptyList()), getFullyQualifiedDomainName(), getEventHubName());
int count = 0;
while (count < 10) {
final EventData data = createData();
data.getProperties().put(MESSAGE_TRACKING_ID, messageValue);
if (!batch.tryAdd(data)) {
break;
}
count++;
}
final CountDownLatch countDownLatch = new CountDownLatch(batch.getCount());
final List<EventHubConsumerAsyncClient> consumers = new ArrayList<>();
try {
final List<String> partitionIds = producer.getPartitionIds().collectList().block(TIMEOUT);
Assertions.assertNotNull(partitionIds);
for (String id : partitionIds) {
final EventHubConsumerAsyncClient consumer = builder.buildAsyncConsumerClient();
consumers.add(consumer);
consumer.receiveFromPartition(id, EventPosition.latest()).subscribe(partitionEvent -> {
EventData event = partitionEvent.getData();
if (event.getPartitionKey() == null || !PARTITION_KEY.equals(event.getPartitionKey())) {
return;
}
if (isMatchingEvent(event, messageValue)) {
logger.info("Event[{}] matched. Countdown: {}", event.getSequenceNumber(), countDownLatch.getCount());
countDownLatch.countDown();
} else {
logger.warning(String.format("Event[%s] matched partition key, but not GUID. Expected: %s. Actual: %s",
event.getSequenceNumber(), messageValue, event.getProperties().get(MESSAGE_TRACKING_ID)));
}
}, error -> {
Assertions.fail("An error should not have occurred:" + error.toString());
}, () -> {
logger.info("Disposing of consumer now that the receive is complete.");
dispose(consumer);
});
}
producer.send(batch.getEvents(), sendOptions).block();
countDownLatch.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS);
} finally {
logger.info("Disposing of subscriptions.");
dispose(consumers.toArray(new EventHubConsumerAsyncClient[0]));
}
Assertions.assertEquals(0, countDownLatch.getCount());
}
/**
* Verify we can send a batch by specifying the {@code maxMessageSize} and partition key.
*/
@Test
public void sendEventsFullBatchWithPartitionKey() {
final int maxMessageSize = 1024;
final EventDataBatch batch = new EventDataBatch(maxMessageSize, null, PARTITION_KEY, contextProvider,
new TracerProvider(Collections.emptyList()), getFullyQualifiedDomainName(), getEventHubName());
final Random random = new Random();
final SendOptions sendOptions = new SendOptions().setPartitionKey(PARTITION_KEY);
int count = 0;
while (true) {
final EventData eventData = new EventData("a".getBytes());
for (int i = 0; i < random.nextInt(20); i++) {
eventData.getProperties().put("key" + i, "value");
}
if (batch.tryAdd(eventData)) {
count++;
} else {
break;
}
}
Assertions.assertEquals(count, batch.getCount());
StepVerifier.create(producer.send(batch.getEvents(), sendOptions))
.verifyComplete();
}
private static EventData createData() {
return new EventData("a".getBytes(StandardCharsets.UTF_8));
}
} |
updated. | public void sendSmallEventsFullBatchPartitionKey() {
final EventDataBatch batch = new EventDataBatch(ClientConstants.MAX_MESSAGE_LENGTH_BYTES, null, PARTITION_KEY, contextProvider,
new TracerProvider(Collections.emptyList()), null, null);
int count = 0;
while (batch.tryAdd(createData())) {
if (count % 100 == 0) {
logger.verbose("Batch size: {}", batch.getCount());
}
count++;
}
StepVerifier.create(producer.send(batch.getEvents()))
.verifyComplete();
} | new TracerProvider(Collections.emptyList()), null, null); | public void sendSmallEventsFullBatchPartitionKey() {
final EventDataBatch batch = new EventDataBatch(ClientConstants.MAX_MESSAGE_LENGTH_BYTES, null, PARTITION_KEY, contextProvider,
new TracerProvider(Collections.emptyList()), getFullyQualifiedDomainName(), getEventHubName());
int count = 0;
while (batch.tryAdd(createData())) {
if (count % 100 == 0) {
logger.verbose("Batch size: {}", batch.getCount());
}
count++;
}
StepVerifier.create(producer.send(batch.getEvents()))
.verifyComplete();
} | class EventDataBatchIntegrationTest extends IntegrationTestBase {
private static final String PARTITION_KEY = "PartitionIDCopyFromProducerOption";
private EventHubAsyncClient client;
private EventHubProducerAsyncClient producer;
@Mock
private ErrorContextProvider contextProvider;
public EventDataBatchIntegrationTest() {
super(new ClientLogger(EventDataBatchIntegrationTest.class));
}
@Override
protected void beforeTest() {
MockitoAnnotations.initMocks(this);
client = createBuilder().shareConnection().buildAsyncClient();
producer = client.createProducer();
}
@Override
protected void afterTest() {
dispose(producer, client);
}
/**
* Test for sending full batch without partition key
*/
@Test
public void sendSmallEventsFullBatch() {
final EventDataBatch batch = new EventDataBatch(ClientConstants.MAX_MESSAGE_LENGTH_BYTES, null, null, contextProvider,
new TracerProvider(Collections.emptyList()), null, null);
int count = 0;
while (batch.tryAdd(createData())) {
if (count % 100 == 0) {
logger.verbose("Batch size: {}", batch.getCount());
}
count++;
}
StepVerifier.create(producer.send(batch.getEvents()))
.verifyComplete();
}
/**
* Test for sending a message batch that is {@link ClientConstants
*/
@Test
/**
* Verifies that when we send 10 messages with the same partition key and some application properties, the received
* EventData also contains the {@link EventData
*/
@Test
public void sendBatchPartitionKeyValidate() throws InterruptedException {
final String messageValue = UUID.randomUUID().toString();
final SendOptions sendOptions = new SendOptions().setPartitionKey(PARTITION_KEY);
final EventDataBatch batch = new EventDataBatch(ClientConstants.MAX_MESSAGE_LENGTH_BYTES, null, PARTITION_KEY, contextProvider,
new TracerProvider(Collections.emptyList()), null, null);
int count = 0;
while (count < 10) {
final EventData data = createData();
data.getProperties().put(MESSAGE_TRACKING_ID, messageValue);
if (!batch.tryAdd(data)) {
break;
}
count++;
}
final CountDownLatch countDownLatch = new CountDownLatch(batch.getCount());
final List<EventHubConsumerAsyncClient> consumers = new ArrayList<>();
try {
final List<String> partitionIds = client.getPartitionIds().collectList().block(TIMEOUT);
Assertions.assertNotNull(partitionIds);
for (String id : partitionIds) {
final EventHubConsumerAsyncClient consumer =
client.createConsumer(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME, EventHubClientBuilder.DEFAULT_PREFETCH_COUNT);
consumers.add(consumer);
consumer.receiveFromPartition(id, EventPosition.latest()).subscribe(partitionEvent -> {
EventData event = partitionEvent.getData();
if (event.getPartitionKey() == null || !PARTITION_KEY.equals(event.getPartitionKey())) {
return;
}
if (isMatchingEvent(event, messageValue)) {
logger.info("Event[{}] matched. Countdown: {}", event.getSequenceNumber(), countDownLatch.getCount());
countDownLatch.countDown();
} else {
logger.warning(String.format("Event[%s] matched partition key, but not GUID. Expected: %s. Actual: %s",
event.getSequenceNumber(), messageValue, event.getProperties().get(MESSAGE_TRACKING_ID)));
}
}, error -> {
Assertions.fail("An error should not have occurred:" + error.toString());
}, () -> {
logger.info("Disposing of consumer now that the receive is complete.");
dispose(consumer);
});
}
producer.send(batch.getEvents(), sendOptions).block();
countDownLatch.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS);
} finally {
logger.info("Disposing of subscriptions.");
dispose(consumers.toArray(new EventHubConsumerAsyncClient[0]));
}
Assertions.assertEquals(0, countDownLatch.getCount());
}
/**
* Verify we can send a batch by specifying the {@code maxMessageSize} and partition key.
*/
@Test
public void sendEventsFullBatchWithPartitionKey() {
final int maxMessageSize = 1024;
final EventDataBatch batch = new EventDataBatch(maxMessageSize, null, PARTITION_KEY, contextProvider,
new TracerProvider(Collections.emptyList()), null, null);
final Random random = new Random();
final SendOptions sendOptions = new SendOptions().setPartitionKey(PARTITION_KEY);
int count = 0;
while (true) {
final EventData eventData = new EventData("a".getBytes());
for (int i = 0; i < random.nextInt(20); i++) {
eventData.getProperties().put("key" + i, "value");
}
if (batch.tryAdd(eventData)) {
count++;
} else {
break;
}
}
Assertions.assertEquals(count, batch.getCount());
StepVerifier.create(producer.send(batch.getEvents(), sendOptions))
.verifyComplete();
}
private static EventData createData() {
return new EventData("a".getBytes(StandardCharsets.UTF_8));
}
} | class EventDataBatchIntegrationTest extends IntegrationTestBase {
private static final String PARTITION_KEY = "PartitionIDCopyFromProducerOption";
private EventHubProducerAsyncClient producer;
private EventHubClientBuilder builder;
@Mock
private ErrorContextProvider contextProvider;
public EventDataBatchIntegrationTest() {
super(new ClientLogger(EventDataBatchIntegrationTest.class));
}
@Override
protected void beforeTest() {
MockitoAnnotations.initMocks(this);
builder = createBuilder()
.shareConnection()
.consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME)
.prefetchCount(EventHubClientBuilder.DEFAULT_PREFETCH_COUNT);
producer = builder.buildAsyncProducerClient();
}
@Override
protected void afterTest() {
dispose(producer);
}
/**
* Test for sending full batch without partition key
*/
@Test
public void sendSmallEventsFullBatch() {
final EventDataBatch batch = new EventDataBatch(ClientConstants.MAX_MESSAGE_LENGTH_BYTES, null, null, contextProvider,
new TracerProvider(Collections.emptyList()), getFullyQualifiedDomainName(), getEventHubName());
int count = 0;
while (batch.tryAdd(createData())) {
if (count % 100 == 0) {
logger.verbose("Batch size: {}", batch.getCount());
}
count++;
}
StepVerifier.create(producer.send(batch.getEvents()))
.verifyComplete();
}
/**
* Test for sending a message batch that is {@link ClientConstants
*/
@Test
/**
* Verifies that when we send 10 messages with the same partition key and some application properties, the received
* EventData also contains the {@link EventData
*/
@Test
public void sendBatchPartitionKeyValidate() throws InterruptedException {
final String messageValue = UUID.randomUUID().toString();
final SendOptions sendOptions = new SendOptions().setPartitionKey(PARTITION_KEY);
final EventDataBatch batch = new EventDataBatch(ClientConstants.MAX_MESSAGE_LENGTH_BYTES, null, PARTITION_KEY, contextProvider,
new TracerProvider(Collections.emptyList()), getFullyQualifiedDomainName(), getEventHubName());
int count = 0;
while (count < 10) {
final EventData data = createData();
data.getProperties().put(MESSAGE_TRACKING_ID, messageValue);
if (!batch.tryAdd(data)) {
break;
}
count++;
}
final CountDownLatch countDownLatch = new CountDownLatch(batch.getCount());
final List<EventHubConsumerAsyncClient> consumers = new ArrayList<>();
try {
final List<String> partitionIds = producer.getPartitionIds().collectList().block(TIMEOUT);
Assertions.assertNotNull(partitionIds);
for (String id : partitionIds) {
final EventHubConsumerAsyncClient consumer = builder.buildAsyncConsumerClient();
consumers.add(consumer);
consumer.receiveFromPartition(id, EventPosition.latest()).subscribe(partitionEvent -> {
EventData event = partitionEvent.getData();
if (event.getPartitionKey() == null || !PARTITION_KEY.equals(event.getPartitionKey())) {
return;
}
if (isMatchingEvent(event, messageValue)) {
logger.info("Event[{}] matched. Countdown: {}", event.getSequenceNumber(), countDownLatch.getCount());
countDownLatch.countDown();
} else {
logger.warning(String.format("Event[%s] matched partition key, but not GUID. Expected: %s. Actual: %s",
event.getSequenceNumber(), messageValue, event.getProperties().get(MESSAGE_TRACKING_ID)));
}
}, error -> {
Assertions.fail("An error should not have occurred:" + error.toString());
}, () -> {
logger.info("Disposing of consumer now that the receive is complete.");
dispose(consumer);
});
}
producer.send(batch.getEvents(), sendOptions).block();
countDownLatch.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS);
} finally {
logger.info("Disposing of subscriptions.");
dispose(consumers.toArray(new EventHubConsumerAsyncClient[0]));
}
Assertions.assertEquals(0, countDownLatch.getCount());
}
/**
* Verify we can send a batch by specifying the {@code maxMessageSize} and partition key.
*/
@Test
public void sendEventsFullBatchWithPartitionKey() {
final int maxMessageSize = 1024;
final EventDataBatch batch = new EventDataBatch(maxMessageSize, null, PARTITION_KEY, contextProvider,
new TracerProvider(Collections.emptyList()), getFullyQualifiedDomainName(), getEventHubName());
final Random random = new Random();
final SendOptions sendOptions = new SendOptions().setPartitionKey(PARTITION_KEY);
int count = 0;
while (true) {
final EventData eventData = new EventData("a".getBytes());
for (int i = 0; i < random.nextInt(20); i++) {
eventData.getProperties().put("key" + i, "value");
}
if (batch.tryAdd(eventData)) {
count++;
} else {
break;
}
}
Assertions.assertEquals(count, batch.getCount());
StepVerifier.create(producer.send(batch.getEvents(), sendOptions))
.verifyComplete();
}
private static EventData createData() {
return new EventData("a".getBytes(StandardCharsets.UTF_8));
}
} |
"QUEUE OR TOPIC NAME" | public void sendBatch() {
ServiceBusSenderAsyncClient sender = new ServiceBusClientBuilder()
.connectionString(
"Endpoint={fully-qualified-namespace};SharedAccessKeyName={policy-name};SharedAccessKey={key}")
.buildSenderClientBuilder()
.entityName("<QUEUE-NAME>")
.buildAsyncClient();
sender.createBatch().flatMap(batch -> {
batch.tryAdd(new ServiceBusMessage("test-1".getBytes(UTF_8)));
batch.tryAdd(new ServiceBusMessage("test-2".getBytes(UTF_8)));
return sender.send(batch);
}).subscribe(unused -> {
},
error -> System.err.println("Error occurred while sending batch:" + error),
() -> System.out.println("Send complete."));
sender.close();
} | .entityName("<QUEUE-NAME>") | public void sendBatch() {
ServiceBusSenderAsyncClient sender = new ServiceBusClientBuilder()
.connectionString(
"Endpoint={fully-qualified-namespace};SharedAccessKeyName={policy-name};SharedAccessKey={key}")
.buildSenderClientBuilder()
.entityName("<QUEUE OR TOPIC NAME>")
.buildAsyncClient();
sender.createBatch().flatMap(batch -> {
batch.tryAdd(new ServiceBusMessage("test-1".getBytes(UTF_8)));
batch.tryAdd(new ServiceBusMessage("test-2".getBytes(UTF_8)));
return sender.send(batch);
}).subscribe(unused -> {
},
error -> System.err.println("Error occurred while sending batch:" + error),
() -> System.out.println("Send complete."));
sender.close();
} | class ServiceBusSenderAsyncClientJavaDocCodeSamples {
private final ServiceBusClientBuilder builder = new ServiceBusClientBuilder()
.connectionString("fake-string");
/**
* Code snippet demonstrating how to create an {@link ServiceBusSenderAsyncClient}.
*/
public void instantiate() {
ServiceBusSenderAsyncClient sender = new ServiceBusClientBuilder()
.connectionString(
"Endpoint={fully-qualified-namespace};SharedAccessKeyName={policy-name};SharedAccessKey={key}")
.buildSenderClientBuilder()
.entityName("<QUEUE-NAME>")
.buildAsyncClient();
sender.close();
}
/**
* Code snippet demonstrating how to send a batch to Service Bus queue or topic.
*/
/**
* Code snippet demonstrating how to create a size-limited {@link ServiceBusMessageBatch} and send it.
*/
public void batchSizeLimited() {
final ServiceBusSenderAsyncClient sender = new ServiceBusClientBuilder()
.buildSenderClientBuilder()
.buildAsyncClient();
final ServiceBusMessage firstMessage = new ServiceBusMessage("92".getBytes(UTF_8));
firstMessage.getProperties().put("telemetry", "latency");
final ServiceBusMessage secondMessage = new ServiceBusMessage("98".getBytes(UTF_8));
secondMessage.getProperties().put("telemetry", "cpu-temperature");
final Flux<ServiceBusMessage> telemetryMessages = Flux.just(firstMessage, secondMessage);
final CreateBatchOptions options = new CreateBatchOptions()
.setMaximumSizeInBytes(256);
final AtomicReference<ServiceBusMessageBatch> currentBatch = new AtomicReference<>(
sender.createBatch(options).block());
telemetryMessages.flatMap(message -> {
final ServiceBusMessageBatch batch = currentBatch.get();
if (batch.tryAdd(message)) {
return Mono.empty();
}
return Mono.when(
sender.send(batch),
sender.createBatch(options).map(newBatch -> {
currentBatch.set(newBatch);
if (!newBatch.tryAdd(message)) {
throw Exceptions.propagate(new IllegalArgumentException(
"Message was too large to fit in an empty batch. Max size: " + newBatch.getMaxSizeInBytes()));
}
return newBatch;
}));
}).then()
.doFinally(signal -> {
final ServiceBusMessageBatch batch = currentBatch.getAndSet(null);
if (batch != null && batch.getCount() > 0) {
sender.send(batch).block();
}
});
}
} | class ServiceBusSenderAsyncClientJavaDocCodeSamples {
private final ServiceBusClientBuilder builder = new ServiceBusClientBuilder()
.connectionString("fake-string");
/**
* Code snippet demonstrating how to create an {@link ServiceBusSenderAsyncClient}.
*/
public void instantiate() {
ServiceBusSenderAsyncClient sender = new ServiceBusClientBuilder()
.connectionString(
"Endpoint={fully-qualified-namespace};SharedAccessKeyName={policy-name};SharedAccessKey={key}")
.buildSenderClientBuilder()
.entityName("<QUEUE-NAME>")
.buildAsyncClient();
sender.close();
}
/**
* Code snippet demonstrating how to create an {@link ServiceBusSenderAsyncClient}.
*/
public void instantiateWithDefaultCredential() {
ServiceBusSenderAsyncClient sender = new ServiceBusClientBuilder()
.credential("<<fully-qualified-namespace>>",
new DefaultAzureCredentialBuilder().build())
.buildSenderClientBuilder()
.entityName("<QUEUE-NAME>")
.buildAsyncClient();
sender.close();
}
/**
* Code snippet demonstrating how to send a batch to Service Bus queue or topic.
*/
/**
* Code snippet demonstrating how to create a size-limited {@link ServiceBusMessageBatch} and send it.
*/
public void batchSizeLimited() {
final ServiceBusSenderAsyncClient sender = new ServiceBusClientBuilder()
.buildSenderClientBuilder()
.buildAsyncClient();
final ServiceBusMessage firstMessage = new ServiceBusMessage("92".getBytes(UTF_8));
firstMessage.getProperties().put("telemetry", "latency");
final ServiceBusMessage secondMessage = new ServiceBusMessage("98".getBytes(UTF_8));
secondMessage.getProperties().put("telemetry", "cpu-temperature");
final Flux<ServiceBusMessage> telemetryMessages = Flux.just(firstMessage, secondMessage);
final CreateBatchOptions options = new CreateBatchOptions()
.setMaximumSizeInBytes(256);
final AtomicReference<ServiceBusMessageBatch> currentBatch = new AtomicReference<>(
sender.createBatch(options).block());
telemetryMessages.flatMap(message -> {
final ServiceBusMessageBatch batch = currentBatch.get();
if (batch.tryAdd(message)) {
return Mono.empty();
}
return Mono.when(
sender.send(batch),
sender.createBatch(options).map(newBatch -> {
currentBatch.set(newBatch);
if (!newBatch.tryAdd(message)) {
throw Exceptions.propagate(new IllegalArgumentException(
"Message was too large to fit in an empty batch. Max size: " + newBatch.getMaxSizeInBytes()));
}
return newBatch;
}));
}).then()
.doFinally(signal -> {
final ServiceBusMessageBatch batch = currentBatch.getAndSet(null);
if (batch != null && batch.getCount() > 0) {
sender.send(batch).block();
}
});
}
} |
is there a createBatch that takes an int parameter? I think the snippet text is wrong | public void batchSizeLimited() {
final ServiceBusSenderAsyncClient sender = new ServiceBusClientBuilder()
.buildSenderClientBuilder()
.buildAsyncClient();
final ServiceBusMessage firstMessage = new ServiceBusMessage("92".getBytes(UTF_8));
firstMessage.getProperties().put("telemetry", "latency");
final ServiceBusMessage secondMessage = new ServiceBusMessage("98".getBytes(UTF_8));
secondMessage.getProperties().put("telemetry", "cpu-temperature");
final Flux<ServiceBusMessage> telemetryMessages = Flux.just(firstMessage, secondMessage);
final CreateBatchOptions options = new CreateBatchOptions()
.setMaximumSizeInBytes(256);
final AtomicReference<ServiceBusMessageBatch> currentBatch = new AtomicReference<>(
sender.createBatch(options).block());
telemetryMessages.flatMap(message -> {
final ServiceBusMessageBatch batch = currentBatch.get();
if (batch.tryAdd(message)) {
return Mono.empty();
}
return Mono.when(
sender.send(batch),
sender.createBatch(options).map(newBatch -> {
currentBatch.set(newBatch);
if (!newBatch.tryAdd(message)) {
throw Exceptions.propagate(new IllegalArgumentException(
"Message was too large to fit in an empty batch. Max size: " + newBatch.getMaxSizeInBytes()));
}
return newBatch;
}));
}).then()
.doFinally(signal -> {
final ServiceBusMessageBatch batch = currentBatch.getAndSet(null);
if (batch != null && batch.getCount() > 0) {
sender.send(batch).block();
}
});
} | public void batchSizeLimited() {
final ServiceBusSenderAsyncClient sender = new ServiceBusClientBuilder()
.buildSenderClientBuilder()
.buildAsyncClient();
final ServiceBusMessage firstMessage = new ServiceBusMessage("92".getBytes(UTF_8));
firstMessage.getProperties().put("telemetry", "latency");
final ServiceBusMessage secondMessage = new ServiceBusMessage("98".getBytes(UTF_8));
secondMessage.getProperties().put("telemetry", "cpu-temperature");
final Flux<ServiceBusMessage> telemetryMessages = Flux.just(firstMessage, secondMessage);
final CreateBatchOptions options = new CreateBatchOptions()
.setMaximumSizeInBytes(256);
final AtomicReference<ServiceBusMessageBatch> currentBatch = new AtomicReference<>(
sender.createBatch(options).block());
telemetryMessages.flatMap(message -> {
final ServiceBusMessageBatch batch = currentBatch.get();
if (batch.tryAdd(message)) {
return Mono.empty();
}
return Mono.when(
sender.send(batch),
sender.createBatch(options).map(newBatch -> {
currentBatch.set(newBatch);
if (!newBatch.tryAdd(message)) {
throw Exceptions.propagate(new IllegalArgumentException(
"Message was too large to fit in an empty batch. Max size: " + newBatch.getMaxSizeInBytes()));
}
return newBatch;
}));
}).then()
.doFinally(signal -> {
final ServiceBusMessageBatch batch = currentBatch.getAndSet(null);
if (batch != null && batch.getCount() > 0) {
sender.send(batch).block();
}
});
} | class ServiceBusSenderAsyncClientJavaDocCodeSamples {
private final ServiceBusClientBuilder builder = new ServiceBusClientBuilder()
.connectionString("fake-string");
/**
* Code snippet demonstrating how to create an {@link ServiceBusSenderAsyncClient}.
*/
public void instantiate() {
ServiceBusSenderAsyncClient sender = new ServiceBusClientBuilder()
.connectionString(
"Endpoint={fully-qualified-namespace};SharedAccessKeyName={policy-name};SharedAccessKey={key}")
.buildSenderClientBuilder()
.entityName("<QUEUE-NAME>")
.buildAsyncClient();
sender.close();
}
/**
* Code snippet demonstrating how to send a batch to Service Bus queue or topic.
*/
public void sendBatch() {
ServiceBusSenderAsyncClient sender = new ServiceBusClientBuilder()
.connectionString(
"Endpoint={fully-qualified-namespace};SharedAccessKeyName={policy-name};SharedAccessKey={key}")
.buildSenderClientBuilder()
.entityName("<QUEUE-NAME>")
.buildAsyncClient();
sender.createBatch().flatMap(batch -> {
batch.tryAdd(new ServiceBusMessage("test-1".getBytes(UTF_8)));
batch.tryAdd(new ServiceBusMessage("test-2".getBytes(UTF_8)));
return sender.send(batch);
}).subscribe(unused -> {
},
error -> System.err.println("Error occurred while sending batch:" + error),
() -> System.out.println("Send complete."));
sender.close();
}
/**
* Code snippet demonstrating how to create a size-limited {@link ServiceBusMessageBatch} and send it.
*/
} | class ServiceBusSenderAsyncClientJavaDocCodeSamples {
private final ServiceBusClientBuilder builder = new ServiceBusClientBuilder()
.connectionString("fake-string");
/**
* Code snippet demonstrating how to create an {@link ServiceBusSenderAsyncClient}.
*/
public void instantiate() {
ServiceBusSenderAsyncClient sender = new ServiceBusClientBuilder()
.connectionString(
"Endpoint={fully-qualified-namespace};SharedAccessKeyName={policy-name};SharedAccessKey={key}")
.buildSenderClientBuilder()
.entityName("<QUEUE-NAME>")
.buildAsyncClient();
sender.close();
}
/**
* Code snippet demonstrating how to create an {@link ServiceBusSenderAsyncClient}.
*/
public void instantiateWithDefaultCredential() {
ServiceBusSenderAsyncClient sender = new ServiceBusClientBuilder()
.credential("<<fully-qualified-namespace>>",
new DefaultAzureCredentialBuilder().build())
.buildSenderClientBuilder()
.entityName("<QUEUE-NAME>")
.buildAsyncClient();
sender.close();
}
/**
* Code snippet demonstrating how to send a batch to Service Bus queue or topic.
*/
public void sendBatch() {
ServiceBusSenderAsyncClient sender = new ServiceBusClientBuilder()
.connectionString(
"Endpoint={fully-qualified-namespace};SharedAccessKeyName={policy-name};SharedAccessKey={key}")
.buildSenderClientBuilder()
.entityName("<QUEUE OR TOPIC NAME>")
.buildAsyncClient();
sender.createBatch().flatMap(batch -> {
batch.tryAdd(new ServiceBusMessage("test-1".getBytes(UTF_8)));
batch.tryAdd(new ServiceBusMessage("test-2".getBytes(UTF_8)));
return sender.send(batch);
}).subscribe(unused -> {
},
error -> System.err.println("Error occurred while sending batch:" + error),
() -> System.out.println("Send complete."));
sender.close();
}
/**
* Code snippet demonstrating how to create a size-limited {@link ServiceBusMessageBatch} and send it.
*/
} | |
you need to specify queueName or topicName + subscription. | public void receiveAll() {
ServiceBusReceiverAsyncClient receiver = new ServiceBusClientBuilder()
.connectionString("fake-string")
.buildReceiverClientBuilder()
.buildAsyncClient();
Disposable subscription = receiver.receive().subscribe(receivedMessage -> {
String messageId = receivedMessage.getMessageId();
System.out.printf("Received message messageId %s%n", messageId);
System.out.printf("Contents of message as string: %s%n", new String(receivedMessage.getBody(), UTF_8));
});
subscription.dispose();
} | .buildAsyncClient(); | public void receiveAll() {
ServiceBusReceiverAsyncClient receiver = new ServiceBusClientBuilder()
.connectionString("fake-string")
.buildReceiverClientBuilder()
.queueName("<QUEUE-NAME>")
.buildAsyncClient();
Disposable subscription = receiver.receive().subscribe(receivedMessage -> {
String messageId = receivedMessage.getMessageId();
System.out.printf("Received message messageId %s%n", messageId);
System.out.printf("Contents of message as string: %s%n", new String(receivedMessage.getBody(), UTF_8));
});
receiver.close();
subscription.dispose();
} | class ServiceBusReceiverAsyncClientJavaDocCodeSamples {
private final ServiceBusReceiverAsyncClient consumer = new ServiceBusClientBuilder()
.connectionString("fake-string")
.buildReceiverClientBuilder()
.queueName("<QUEUE-NAME>")
.buildAsyncClient();
public void initialization() {
ServiceBusReceiverAsyncClient consumer = new ServiceBusClientBuilder()
.connectionString("Endpoint={fully-qualified-namespace};SharedAccessKeyName={policy-name};"
+ "SharedAccessKey={key};EntityPath={eh-name}")
.buildReceiverClientBuilder()
.queueName("<QUEUE-NAME>")
.buildAsyncClient();
consumer.close();
}
public void instantiateWithDefaultCredential() {
ServiceBusReceiverAsyncClient consumer = new ServiceBusClientBuilder()
.credential("<<fully-qualified-namespace>>",
new DefaultAzureCredentialBuilder().build())
.buildReceiverClientBuilder()
.queueName("<QUEUE-NAME>")
.buildAsyncClient();
consumer.close();
}
/**
* Receives message from a queue or topic.
*/
public void receive() {
ServiceBusReceiverAsyncClient receiver = new ServiceBusClientBuilder()
.connectionString("fake-string")
.buildReceiverClientBuilder()
.buildAsyncClient();
Disposable subscription = receiver.receive()
.take(1)
.subscribe(receivedMessage -> {
String messageId = receivedMessage.getMessageId();
System.out.printf("Received message messageId %s%n", messageId);
System.out.printf("Contents of message as string: %s%n", new String(receivedMessage.getBody(), UTF_8));
}, error -> System.err.print(error.toString()));
subscription.dispose();
}
/**
* Receives message with back pressure.
*/
public void receiveBackpressure() {
ServiceBusReceiverAsyncClient receiver = new ServiceBusClientBuilder()
.connectionString("fake-string")
.buildReceiverClientBuilder()
.queueName("<QUEUE-NAME>")
.buildAsyncClient();
receiver.receive().subscribe(new BaseSubscriber<ServiceBusReceivedMessage>() {
private static final int NUMBER_OF_MESSAGES = 5;
private final AtomicInteger currentNumberOfMessages = new AtomicInteger();
@Override
protected void hookOnSubscribe(Subscription subscription) {
request(NUMBER_OF_MESSAGES);
}
@Override
protected void hookOnNext(ServiceBusReceivedMessage value) {
if (currentNumberOfMessages.incrementAndGet() % 5 == 0) {
request(NUMBER_OF_MESSAGES);
}
}
});
}
/**
* Receives from all the messages.
*/
} | class ServiceBusReceiverAsyncClientJavaDocCodeSamples {
private final ServiceBusReceiverAsyncClient consumer = new ServiceBusClientBuilder()
.connectionString("fake-string")
.buildReceiverClientBuilder()
.queueName("<QUEUE-NAME>")
.buildAsyncClient();
public void initialization() {
ServiceBusReceiverAsyncClient consumer = new ServiceBusClientBuilder()
.connectionString("Endpoint={fully-qualified-namespace};SharedAccessKeyName={policy-name};"
+ "SharedAccessKey={key};EntityPath={eh-name}")
.buildReceiverClientBuilder()
.queueName("<QUEUE-NAME>")
.buildAsyncClient();
consumer.close();
}
public void instantiateWithDefaultCredential() {
ServiceBusReceiverAsyncClient receiver = new ServiceBusClientBuilder()
.credential("<<fully-qualified-namespace>>",
new DefaultAzureCredentialBuilder().build())
.buildReceiverClientBuilder()
.queueName("<QUEUE-NAME>")
.buildAsyncClient();
receiver.close();
}
/**
* Receives message from a queue or topic using receive and delete mode.
*/
public void receiveWithReceiveAndDeleteMode() {
ServiceBusReceiverAsyncClient receiver = new ServiceBusClientBuilder()
.connectionString("fake-string")
.buildReceiverClientBuilder()
.receiveMode(ReceiveMode.RECEIVE_AND_DELETE)
.queueName("<QUEUE-NAME>")
.buildAsyncClient();
Disposable subscription = receiver.receive()
.subscribe(receivedMessage -> {
String messageId = receivedMessage.getMessageId();
System.out.printf("Received message messageId %s%n", messageId);
System.out.printf("Contents of message as string: %s%n", new String(receivedMessage.getBody(), UTF_8));
}, error -> System.err.print(error.toString()));
receiver.close();
subscription.dispose();
}
/**
* Receives message with back pressure.
*/
public void receiveBackpressure() {
ServiceBusReceiverAsyncClient receiver = new ServiceBusClientBuilder()
.connectionString("fake-string")
.buildReceiverClientBuilder()
.queueName("<QUEUE-NAME>")
.buildAsyncClient();
receiver.receive().subscribe(new BaseSubscriber<ServiceBusReceivedMessage>() {
private static final int NUMBER_OF_MESSAGES = 5;
private final AtomicInteger currentNumberOfMessages = new AtomicInteger();
@Override
protected void hookOnSubscribe(Subscription subscription) {
request(NUMBER_OF_MESSAGES);
}
@Override
protected void hookOnNext(ServiceBusReceivedMessage value) {
if (currentNumberOfMessages.incrementAndGet() % 5 == 0) {
request(NUMBER_OF_MESSAGES);
}
}
});
receiver.close();
}
/**
* Receives from all the messages.
*/
} |
You need to dispose of consumer as well, like you did in other samples. (even if it doesn't appear in the snippet.) | public void receiveAll() {
ServiceBusReceiverAsyncClient receiver = new ServiceBusClientBuilder()
.connectionString("fake-string")
.buildReceiverClientBuilder()
.buildAsyncClient();
Disposable subscription = receiver.receive().subscribe(receivedMessage -> {
String messageId = receivedMessage.getMessageId();
System.out.printf("Received message messageId %s%n", messageId);
System.out.printf("Contents of message as string: %s%n", new String(receivedMessage.getBody(), UTF_8));
});
subscription.dispose();
} | } | public void receiveAll() {
ServiceBusReceiverAsyncClient receiver = new ServiceBusClientBuilder()
.connectionString("fake-string")
.buildReceiverClientBuilder()
.queueName("<QUEUE-NAME>")
.buildAsyncClient();
Disposable subscription = receiver.receive().subscribe(receivedMessage -> {
String messageId = receivedMessage.getMessageId();
System.out.printf("Received message messageId %s%n", messageId);
System.out.printf("Contents of message as string: %s%n", new String(receivedMessage.getBody(), UTF_8));
});
receiver.close();
subscription.dispose();
} | class ServiceBusReceiverAsyncClientJavaDocCodeSamples {
private final ServiceBusReceiverAsyncClient consumer = new ServiceBusClientBuilder()
.connectionString("fake-string")
.buildReceiverClientBuilder()
.queueName("<QUEUE-NAME>")
.buildAsyncClient();
public void initialization() {
ServiceBusReceiverAsyncClient consumer = new ServiceBusClientBuilder()
.connectionString("Endpoint={fully-qualified-namespace};SharedAccessKeyName={policy-name};"
+ "SharedAccessKey={key};EntityPath={eh-name}")
.buildReceiverClientBuilder()
.queueName("<QUEUE-NAME>")
.buildAsyncClient();
consumer.close();
}
public void instantiateWithDefaultCredential() {
ServiceBusReceiverAsyncClient consumer = new ServiceBusClientBuilder()
.credential("<<fully-qualified-namespace>>",
new DefaultAzureCredentialBuilder().build())
.buildReceiverClientBuilder()
.queueName("<QUEUE-NAME>")
.buildAsyncClient();
consumer.close();
}
/**
* Receives message from a queue or topic.
*/
public void receive() {
ServiceBusReceiverAsyncClient receiver = new ServiceBusClientBuilder()
.connectionString("fake-string")
.buildReceiverClientBuilder()
.buildAsyncClient();
Disposable subscription = receiver.receive()
.take(1)
.subscribe(receivedMessage -> {
String messageId = receivedMessage.getMessageId();
System.out.printf("Received message messageId %s%n", messageId);
System.out.printf("Contents of message as string: %s%n", new String(receivedMessage.getBody(), UTF_8));
}, error -> System.err.print(error.toString()));
subscription.dispose();
}
/**
* Receives message with back pressure.
*/
public void receiveBackpressure() {
ServiceBusReceiverAsyncClient receiver = new ServiceBusClientBuilder()
.connectionString("fake-string")
.buildReceiverClientBuilder()
.queueName("<QUEUE-NAME>")
.buildAsyncClient();
receiver.receive().subscribe(new BaseSubscriber<ServiceBusReceivedMessage>() {
private static final int NUMBER_OF_MESSAGES = 5;
private final AtomicInteger currentNumberOfMessages = new AtomicInteger();
@Override
protected void hookOnSubscribe(Subscription subscription) {
request(NUMBER_OF_MESSAGES);
}
@Override
protected void hookOnNext(ServiceBusReceivedMessage value) {
if (currentNumberOfMessages.incrementAndGet() % 5 == 0) {
request(NUMBER_OF_MESSAGES);
}
}
});
}
/**
* Receives from all the messages.
*/
} | class ServiceBusReceiverAsyncClientJavaDocCodeSamples {
private final ServiceBusReceiverAsyncClient consumer = new ServiceBusClientBuilder()
.connectionString("fake-string")
.buildReceiverClientBuilder()
.queueName("<QUEUE-NAME>")
.buildAsyncClient();
public void initialization() {
ServiceBusReceiverAsyncClient consumer = new ServiceBusClientBuilder()
.connectionString("Endpoint={fully-qualified-namespace};SharedAccessKeyName={policy-name};"
+ "SharedAccessKey={key};EntityPath={eh-name}")
.buildReceiverClientBuilder()
.queueName("<QUEUE-NAME>")
.buildAsyncClient();
consumer.close();
}
public void instantiateWithDefaultCredential() {
ServiceBusReceiverAsyncClient receiver = new ServiceBusClientBuilder()
.credential("<<fully-qualified-namespace>>",
new DefaultAzureCredentialBuilder().build())
.buildReceiverClientBuilder()
.queueName("<QUEUE-NAME>")
.buildAsyncClient();
receiver.close();
}
/**
* Receives message from a queue or topic using receive and delete mode.
*/
public void receiveWithReceiveAndDeleteMode() {
ServiceBusReceiverAsyncClient receiver = new ServiceBusClientBuilder()
.connectionString("fake-string")
.buildReceiverClientBuilder()
.receiveMode(ReceiveMode.RECEIVE_AND_DELETE)
.queueName("<QUEUE-NAME>")
.buildAsyncClient();
Disposable subscription = receiver.receive()
.subscribe(receivedMessage -> {
String messageId = receivedMessage.getMessageId();
System.out.printf("Received message messageId %s%n", messageId);
System.out.printf("Contents of message as string: %s%n", new String(receivedMessage.getBody(), UTF_8));
}, error -> System.err.print(error.toString()));
receiver.close();
subscription.dispose();
}
/**
* Receives message with back pressure.
*/
public void receiveBackpressure() {
ServiceBusReceiverAsyncClient receiver = new ServiceBusClientBuilder()
.connectionString("fake-string")
.buildReceiverClientBuilder()
.queueName("<QUEUE-NAME>")
.buildAsyncClient();
receiver.receive().subscribe(new BaseSubscriber<ServiceBusReceivedMessage>() {
private static final int NUMBER_OF_MESSAGES = 5;
private final AtomicInteger currentNumberOfMessages = new AtomicInteger();
@Override
protected void hookOnSubscribe(Subscription subscription) {
request(NUMBER_OF_MESSAGES);
}
@Override
protected void hookOnNext(ServiceBusReceivedMessage value) {
if (currentNumberOfMessages.incrementAndGet() % 5 == 0) {
request(NUMBER_OF_MESSAGES);
}
}
});
receiver.close();
}
/**
* Receives from all the messages.
*/
} |
Removed as this should be caught by the check for `Page.class` and should be deserialized through that code path. | private static boolean isReturnTypeDecodable(Type returnType) {
if (returnType == null) {
return false;
}
if (TypeUtil.isTypeOrSubTypeOf(returnType, Mono.class)) {
returnType = TypeUtil.getTypeArgument(returnType);
}
if (TypeUtil.isTypeOrSubTypeOf(returnType, ResponseBase.class)) {
ParameterizedType parameterizedType =
(ParameterizedType) TypeUtil.getSuperType(returnType, ResponseBase.class);
if (parameterizedType.getActualTypeArguments().length == 2) {
returnType = parameterizedType.getActualTypeArguments()[1];
}
}
return !FluxUtil.isFluxByteBuffer(returnType)
&& !TypeUtil.isTypeOrSubTypeOf(returnType, byte[].class)
&& !TypeUtil.isTypeOrSubTypeOf(returnType, Void.TYPE)
&& !TypeUtil.isTypeOrSubTypeOf(returnType, Void.class);
} | private static boolean isReturnTypeDecodable(Type returnType) {
if (returnType == null) {
return false;
}
if (TypeUtil.isTypeOrSubTypeOf(returnType, Mono.class)) {
returnType = TypeUtil.getTypeArgument(returnType);
}
if (TypeUtil.isTypeOrSubTypeOf(returnType, ResponseBase.class)) {
ParameterizedType parameterizedType =
(ParameterizedType) TypeUtil.getSuperType(returnType, ResponseBase.class);
if (parameterizedType.getActualTypeArguments().length == 2) {
returnType = parameterizedType.getActualTypeArguments()[1];
}
}
return !FluxUtil.isFluxByteBuffer(returnType)
&& !TypeUtil.isTypeOrSubTypeOf(returnType, byte[].class)
&& !TypeUtil.isTypeOrSubTypeOf(returnType, Void.TYPE)
&& !TypeUtil.isTypeOrSubTypeOf(returnType, Void.class);
} | class HttpResponseBodyDecoder {
/**
* Decodes body of a http response.
*
* The content reading and decoding happens when caller subscribe to the returned {@code Mono<Object>}, if the
* response body is not decodable then {@code Mono.empty()} will be returned.
*
* @param body the response body to decode, null for this parameter indicate read body from {@code httpResponse}
* parameter and decode it.
* @param httpResponse the response containing the body to be decoded
* @param serializer the adapter to use for decoding
* @param decodeData the necessary data required to decode a Http response
* @return publisher that emits decoded response body upon subscription if body is decodable, no emission if the
* body is not-decodable
*/
static Mono<Object> decode(String body, HttpResponse httpResponse, SerializerAdapter serializer,
HttpResponseDecodeData decodeData) {
ensureRequestSet(httpResponse);
final ClientLogger logger = new ClientLogger(HttpResponseBodyDecoder.class);
return Mono.defer(() -> {
if (isErrorStatus(httpResponse, decodeData)) {
Mono<String> bodyMono = body == null ? httpResponse.getBodyAsString() : Mono.just(body);
return bodyMono.flatMap(bodyString -> {
try {
final Object decodedErrorEntity = deserializeBody(bodyString,
decodeData.getUnexpectedException(httpResponse.getStatusCode()).getExceptionBodyType(),
null, serializer, SerializerEncoding.fromHeaders(httpResponse.getHeaders()));
return Mono.justOrEmpty(decodedErrorEntity);
} catch (IOException | MalformedValueException ex) {
logger.warning("Failed to deserialize the error entity.", ex);
return Mono.empty();
}
});
} else if (httpResponse.getRequest().getHttpMethod() == HttpMethod.HEAD) {
return Mono.empty();
} else {
if (!isReturnTypeDecodable(decodeData.getReturnType())) {
return Mono.empty();
}
Mono<String> bodyMono = body == null ? httpResponse.getBodyAsString() : Mono.just(body);
return bodyMono.flatMap(bodyString -> {
try {
final Object decodedSuccessEntity = deserializeBody(bodyString,
extractEntityTypeFromReturnType(decodeData), decodeData.getReturnValueWireType(),
serializer, SerializerEncoding.fromHeaders(httpResponse.getHeaders()));
return Mono.justOrEmpty(decodedSuccessEntity);
} catch (MalformedValueException e) {
return Mono.error(new HttpResponseException("HTTP response has a malformed body.",
httpResponse, e));
} catch (IOException e) {
return Mono.error(new HttpResponseException("Deserialization Failed.", httpResponse, e));
}
});
}
});
}
/**
* @return the decoded type used to decode the response body, null if the body is not decodable.
*/
static Type decodedType(HttpResponse httpResponse, HttpResponseDecodeData decodeData) {
ensureRequestSet(httpResponse);
if (isErrorStatus(httpResponse, decodeData)) {
return decodeData.getUnexpectedException(httpResponse.getStatusCode()).getExceptionBodyType();
} else if (httpResponse.getRequest().getHttpMethod() == HttpMethod.HEAD) {
return null;
} else {
return isReturnTypeDecodable(decodeData.getReturnType())
? extractEntityTypeFromReturnType(decodeData)
: null;
}
}
/**
* Checks the response status code is considered as error.
*
* @param httpResponse the response to check
* @param decodeData the response metadata
* @return true if the response status code is considered as error, false otherwise.
*/
static boolean isErrorStatus(HttpResponse httpResponse, HttpResponseDecodeData decodeData) {
final int[] expectedStatuses = decodeData.getExpectedStatusCodes();
int statusCode = httpResponse.getStatusCode();
if (expectedStatuses != null) {
return Arrays.stream(expectedStatuses).noneMatch(expectedCode -> expectedCode == statusCode);
} else {
return statusCode / 100 != 2;
}
}
/**
* Deserialize the given string value representing content of a REST API response.
*
* If the {@link ReturnValueWireType} is of type {@link Page}, then the returned object will be an instance of that
* {@param wireType}. Otherwise, the returned object is converted back to its {@param resultType}.
*
* @param value the string value to deserialize
* @param resultType the return type of the java proxy method
* @param wireType value of optional {@link ReturnValueWireType} annotation present in java proxy method indicating
* 'entity type' (wireType) of REST API wire response body
* @param encoding the encoding format of value
* @return Deserialized object
* @throws IOException When the body cannot be deserialized
*/
private static Object deserializeBody(String value, Type resultType, Type wireType, SerializerAdapter serializer,
SerializerEncoding encoding) throws IOException {
if (wireType == null) {
return serializer.deserialize(value, resultType, encoding);
} else if (TypeUtil.isTypeOrSubTypeOf(wireType, Page.class)) {
return deserializePage(value, resultType, wireType, serializer, encoding);
} else {
final Type wireResponseType = constructWireResponseType(resultType, wireType);
final Object wireResponse = serializer.deserialize(value, wireResponseType, encoding);
return convertToResultType(wireResponse, resultType, wireType);
}
}
/**
* Given: (1). the {@code java.lang.reflect.Type} (resultType) of java proxy method return value (2). and {@link
* ReturnValueWireType} annotation value indicating 'entity type' (wireType) of same REST API's wire response body
* this method construct 'response body Type'.
*
* Note: When {@link ReturnValueWireType} annotation is applied to a proxy method, then the raw HTTP response
* content will need to parsed using the derived 'response body Type' then converted to actual {@code returnType}.
*
* @param resultType the {@code java.lang.reflect.Type} of java proxy method return value
* @param wireType the {@code java.lang.reflect.Type} of entity in REST API response body
* @return the {@code java.lang.reflect.Type} of REST API response body
*/
private static Type constructWireResponseType(Type resultType, Type wireType) {
Objects.requireNonNull(wireType);
Type wireResponseType = resultType;
if (resultType == OffsetDateTime.class) {
if (wireType == DateTimeRfc1123.class) {
wireResponseType = DateTimeRfc1123.class;
} else if (wireType == UnixTime.class) {
wireResponseType = UnixTime.class;
}
} else if (TypeUtil.isTypeOrSubTypeOf(resultType, List.class)) {
final Type resultElementType = TypeUtil.getTypeArgument(resultType);
final Type wireResponseElementType = constructWireResponseType(resultElementType, wireType);
wireResponseType = TypeUtil.createParameterizedType(
(Class<?>) ((ParameterizedType) resultType).getRawType(), wireResponseElementType);
} else if (TypeUtil.isTypeOrSubTypeOf(resultType, Map.class)) {
Type[] typeArguments = TypeUtil.getTypeArguments(resultType);
final Type resultValueType = typeArguments[1];
final Type wireResponseValueType = constructWireResponseType(resultValueType, wireType);
wireResponseType = TypeUtil.createParameterizedType(
(Class<?>) ((ParameterizedType) resultType).getRawType(), typeArguments[0], wireResponseValueType);
}
return wireResponseType;
}
/**
* Deserializes a response body as a Page<T> given that {@param wireType} is either: 1. A type that implements
* the interface 2. Is of {@link Page}
*
* @param value The string to deserialize
* @param resultType The type T, of the page contents.
* @param wireType The {@link Type} that either is, or implements {@link Page}
* @param serializer The serializer used to deserialize the value.
* @param encoding Encoding used to deserialize string
* @return An object representing an instance of {@param wireType}
* @throws IOException if the serializer is unable to deserialize the value.
*/
private static Object deserializePage(String value, Type resultType, Type wireType, SerializerAdapter serializer,
SerializerEncoding encoding) throws IOException {
final Type wireResponseType = (wireType == Page.class)
? TypeUtil.createParameterizedType(ItemPage.class, resultType)
: wireType;
return serializer.deserialize(value, wireResponseType, encoding);
}
/**
* Converts the object {@code wireResponse} that was deserialized using 'response body Type' (produced by {@code
* constructWireResponseType(args)} method) to resultType.
*
* @param wireResponse the object to convert
* @param resultType the {@code java.lang.reflect.Type} to convert wireResponse to
* @param wireType the {@code java.lang.reflect.Type} of the wireResponse
* @return converted object
*/
private static Object convertToResultType(Object wireResponse, Type resultType, Type wireType) {
Object result = wireResponse;
if (resultType == OffsetDateTime.class) {
if (wireType == DateTimeRfc1123.class) {
result = ((DateTimeRfc1123) wireResponse).getDateTime();
} else if (wireType == UnixTime.class) {
result = ((UnixTime) wireResponse).getDateTime();
}
} else if (TypeUtil.isTypeOrSubTypeOf(resultType, List.class)) {
final Type resultElementType = TypeUtil.getTypeArgument(resultType);
@SuppressWarnings("unchecked") final List<Object> wireResponseList = (List<Object>) wireResponse;
final int wireResponseListSize = wireResponseList.size();
for (int i = 0; i < wireResponseListSize; ++i) {
final Object wireResponseElement = wireResponseList.get(i);
final Object resultElement =
convertToResultType(wireResponseElement, resultElementType, wireType);
if (wireResponseElement != resultElement) {
wireResponseList.set(i, resultElement);
}
}
result = wireResponseList;
} else if (TypeUtil.isTypeOrSubTypeOf(resultType, Map.class)) {
final Type resultValueType = TypeUtil.getTypeArguments(resultType)[1];
@SuppressWarnings("unchecked") final Map<String, Object> wireResponseMap =
(Map<String, Object>) wireResponse;
final Set<Map.Entry<String, Object>> wireResponseEntries = wireResponseMap.entrySet();
for (Map.Entry<String, Object> wireResponseEntry : wireResponseEntries) {
final Object wireResponseValue = wireResponseEntry.getValue();
final Object resultValue = convertToResultType(wireResponseValue, resultValueType, wireType);
if (wireResponseValue != resultValue) {
wireResponseMap.put(wireResponseEntry.getKey(), resultValue);
}
}
result = wireResponseMap;
}
return result;
}
/**
* Get the {@link Type} of the REST API 'returned entity'.
*
* In the declaration of a java proxy method corresponding to the REST API, the 'returned entity' can be:
*
* 1. emission value of the reactor publisher returned by proxy method
*
* e.g. {@code Mono<Foo> getFoo(args);} {@code Flux<Foo> getFoos(args);} where Foo is the REST API 'returned
* entity'.
*
* 2. OR content (value) of {@link ResponseBase} emitted by the reactor publisher returned from proxy method
*
* e.g. {@code Mono<RestResponseBase<headers, Foo>> getFoo(args);} {@code Flux<RestResponseBase<headers, Foo>>
* getFoos(args);} where Foo is the REST API return entity.
*
* @return the entity type.
*/
private static Type extractEntityTypeFromReturnType(HttpResponseDecodeData decodeData) {
Type token = decodeData.getReturnType();
if (TypeUtil.isTypeOrSubTypeOf(token, Mono.class)) {
token = TypeUtil.getTypeArgument(token);
}
if (TypeUtil.isTypeOrSubTypeOf(token, Response.class)) {
token = TypeUtil.getRestResponseBodyType(token);
}
return token;
}
/**
* Checks if the {@code returnType} is a decodable type.
*
* @param returnType The return type of the method.
* @return True if the return type is decodable, false otherwise.
*/
/**
* Ensure that request property and method is set in the response.
*
* @param httpResponse the response to validate
*/
private static void ensureRequestSet(HttpResponse httpResponse) {
Objects.requireNonNull(httpResponse.getRequest());
Objects.requireNonNull(httpResponse.getRequest().getHttpMethod());
}
} | class HttpResponseBodyDecoder {
/**
* Decodes body of a http response.
*
* The content reading and decoding happens when caller subscribe to the returned {@code Mono<Object>}, if the
* response body is not decodable then {@code Mono.empty()} will be returned.
*
* @param body the response body to decode, null for this parameter indicate read body from {@code httpResponse}
* parameter and decode it.
* @param httpResponse the response containing the body to be decoded
* @param serializer the adapter to use for decoding
* @param decodeData the necessary data required to decode a Http response
* @return publisher that emits decoded response body upon subscription if body is decodable, no emission if the
* body is not-decodable
*/
static Mono<Object> decode(String body, HttpResponse httpResponse, SerializerAdapter serializer,
HttpResponseDecodeData decodeData) {
ensureRequestSet(httpResponse);
final ClientLogger logger = new ClientLogger(HttpResponseBodyDecoder.class);
return Mono.defer(() -> {
if (isErrorStatus(httpResponse, decodeData)) {
Mono<String> bodyMono = body == null ? httpResponse.getBodyAsString() : Mono.just(body);
return bodyMono.flatMap(bodyString -> {
try {
final Object decodedErrorEntity = deserializeBody(bodyString,
decodeData.getUnexpectedException(httpResponse.getStatusCode()).getExceptionBodyType(),
null, serializer, SerializerEncoding.fromHeaders(httpResponse.getHeaders()));
return Mono.justOrEmpty(decodedErrorEntity);
} catch (IOException | MalformedValueException ex) {
logger.warning("Failed to deserialize the error entity.", ex);
return Mono.empty();
}
});
} else if (httpResponse.getRequest().getHttpMethod() == HttpMethod.HEAD) {
return Mono.empty();
} else {
if (!isReturnTypeDecodable(decodeData.getReturnType())) {
return Mono.empty();
}
Mono<String> bodyMono = body == null ? httpResponse.getBodyAsString() : Mono.just(body);
return bodyMono.flatMap(bodyString -> {
try {
final Object decodedSuccessEntity = deserializeBody(bodyString,
extractEntityTypeFromReturnType(decodeData), decodeData.getReturnValueWireType(),
serializer, SerializerEncoding.fromHeaders(httpResponse.getHeaders()));
return Mono.justOrEmpty(decodedSuccessEntity);
} catch (MalformedValueException e) {
return Mono.error(new HttpResponseException("HTTP response has a malformed body.",
httpResponse, e));
} catch (IOException e) {
return Mono.error(new HttpResponseException("Deserialization Failed.", httpResponse, e));
}
});
}
});
}
/**
* @return the decoded type used to decode the response body, null if the body is not decodable.
*/
static Type decodedType(HttpResponse httpResponse, HttpResponseDecodeData decodeData) {
ensureRequestSet(httpResponse);
if (isErrorStatus(httpResponse, decodeData)) {
return decodeData.getUnexpectedException(httpResponse.getStatusCode()).getExceptionBodyType();
} else if (httpResponse.getRequest().getHttpMethod() == HttpMethod.HEAD) {
return null;
} else {
return isReturnTypeDecodable(decodeData.getReturnType())
? extractEntityTypeFromReturnType(decodeData)
: null;
}
}
/**
* Checks the response status code is considered as error.
*
* @param httpResponse the response to check
* @param decodeData the response metadata
* @return true if the response status code is considered as error, false otherwise.
*/
static boolean isErrorStatus(HttpResponse httpResponse, HttpResponseDecodeData decodeData) {
final int[] expectedStatuses = decodeData.getExpectedStatusCodes();
int statusCode = httpResponse.getStatusCode();
if (expectedStatuses != null) {
return Arrays.stream(expectedStatuses).noneMatch(expectedCode -> expectedCode == statusCode);
} else {
return statusCode / 100 != 2;
}
}
/**
* Deserialize the given string value representing content of a REST API response.
*
* If the {@link ReturnValueWireType} is of type {@link Page}, then the returned object will be an instance of that
* {@param wireType}. Otherwise, the returned object is converted back to its {@param resultType}.
*
* @param value the string value to deserialize
* @param resultType the return type of the java proxy method
* @param wireType value of optional {@link ReturnValueWireType} annotation present in java proxy method indicating
* 'entity type' (wireType) of REST API wire response body
* @param encoding the encoding format of value
* @return Deserialized object
* @throws IOException When the body cannot be deserialized
*/
private static Object deserializeBody(String value, Type resultType, Type wireType, SerializerAdapter serializer,
SerializerEncoding encoding) throws IOException {
if (wireType == null) {
return serializer.deserialize(value, resultType, encoding);
} else if (TypeUtil.isTypeOrSubTypeOf(wireType, Page.class)) {
return deserializePage(value, resultType, wireType, serializer, encoding);
} else {
final Type wireResponseType = constructWireResponseType(resultType, wireType);
final Object wireResponse = serializer.deserialize(value, wireResponseType, encoding);
return convertToResultType(wireResponse, resultType, wireType);
}
}
/**
* Given: (1). the {@code java.lang.reflect.Type} (resultType) of java proxy method return value (2). and {@link
* ReturnValueWireType} annotation value indicating 'entity type' (wireType) of same REST API's wire response body
* this method construct 'response body Type'.
*
* Note: When {@link ReturnValueWireType} annotation is applied to a proxy method, then the raw HTTP response
* content will need to parsed using the derived 'response body Type' then converted to actual {@code returnType}.
*
* @param resultType the {@code java.lang.reflect.Type} of java proxy method return value
* @param wireType the {@code java.lang.reflect.Type} of entity in REST API response body
* @return the {@code java.lang.reflect.Type} of REST API response body
*/
private static Type constructWireResponseType(Type resultType, Type wireType) {
Objects.requireNonNull(wireType);
Type wireResponseType = resultType;
if (resultType == byte[].class) {
if (wireType == Base64Url.class) {
wireResponseType = Base64Url.class;
}
} else if (resultType == OffsetDateTime.class) {
if (wireType == DateTimeRfc1123.class) {
wireResponseType = DateTimeRfc1123.class;
} else if (wireType == UnixTime.class) {
wireResponseType = UnixTime.class;
}
} else if (TypeUtil.isTypeOrSubTypeOf(resultType, List.class)) {
final Type resultElementType = TypeUtil.getTypeArgument(resultType);
final Type wireResponseElementType = constructWireResponseType(resultElementType, wireType);
wireResponseType = TypeUtil.createParameterizedType(
(Class<?>) ((ParameterizedType) resultType).getRawType(), wireResponseElementType);
} else if (TypeUtil.isTypeOrSubTypeOf(resultType, Map.class)) {
Type[] typeArguments = TypeUtil.getTypeArguments(resultType);
final Type resultValueType = typeArguments[1];
final Type wireResponseValueType = constructWireResponseType(resultValueType, wireType);
wireResponseType = TypeUtil.createParameterizedType(
(Class<?>) ((ParameterizedType) resultType).getRawType(), typeArguments[0], wireResponseValueType);
}
return wireResponseType;
}
/**
* Deserializes a response body as a Page<T> given that {@param wireType} is either: 1. A type that implements
* the interface 2. Is of {@link Page}
*
* @param value The string to deserialize
* @param resultType The type T, of the page contents.
* @param wireType The {@link Type} that either is, or implements {@link Page}
* @param serializer The serializer used to deserialize the value.
* @param encoding Encoding used to deserialize string
* @return An object representing an instance of {@param wireType}
* @throws IOException if the serializer is unable to deserialize the value.
*/
private static Object deserializePage(String value, Type resultType, Type wireType, SerializerAdapter serializer,
SerializerEncoding encoding) throws IOException {
final Type wireResponseType = (wireType == Page.class)
? TypeUtil.createParameterizedType(ItemPage.class, resultType)
: wireType;
return serializer.deserialize(value, wireResponseType, encoding);
}
/**
* Converts the object {@code wireResponse} that was deserialized using 'response body Type' (produced by {@code
* constructWireResponseType(args)} method) to resultType.
*
* @param wireResponse the object to convert
* @param resultType the {@code java.lang.reflect.Type} to convert wireResponse to
* @param wireType the {@code java.lang.reflect.Type} of the wireResponse
* @return converted object
*/
private static Object convertToResultType(Object wireResponse, Type resultType, Type wireType) {
Object result = wireResponse;
if (resultType == byte[].class) {
if (wireType == Base64Url.class) {
result = ((Base64Url) wireResponse).decodedBytes();
}
} else if (resultType == OffsetDateTime.class) {
if (wireType == DateTimeRfc1123.class) {
result = ((DateTimeRfc1123) wireResponse).getDateTime();
} else if (wireType == UnixTime.class) {
result = ((UnixTime) wireResponse).getDateTime();
}
} else if (TypeUtil.isTypeOrSubTypeOf(resultType, List.class)) {
final Type resultElementType = TypeUtil.getTypeArgument(resultType);
@SuppressWarnings("unchecked") final List<Object> wireResponseList = (List<Object>) wireResponse;
final int wireResponseListSize = wireResponseList.size();
for (int i = 0; i < wireResponseListSize; ++i) {
final Object wireResponseElement = wireResponseList.get(i);
final Object resultElement =
convertToResultType(wireResponseElement, resultElementType, wireType);
if (wireResponseElement != resultElement) {
wireResponseList.set(i, resultElement);
}
}
result = wireResponseList;
} else if (TypeUtil.isTypeOrSubTypeOf(resultType, Map.class)) {
final Type resultValueType = TypeUtil.getTypeArguments(resultType)[1];
@SuppressWarnings("unchecked") final Map<String, Object> wireResponseMap =
(Map<String, Object>) wireResponse;
final Set<Map.Entry<String, Object>> wireResponseEntries = wireResponseMap.entrySet();
for (Map.Entry<String, Object> wireResponseEntry : wireResponseEntries) {
final Object wireResponseValue = wireResponseEntry.getValue();
final Object resultValue = convertToResultType(wireResponseValue, resultValueType, wireType);
if (wireResponseValue != resultValue) {
wireResponseMap.put(wireResponseEntry.getKey(), resultValue);
}
}
result = wireResponseMap;
}
return result;
}
/**
* Get the {@link Type} of the REST API 'returned entity'.
*
* In the declaration of a java proxy method corresponding to the REST API, the 'returned entity' can be:
*
* 1. emission value of the reactor publisher returned by proxy method
*
* e.g. {@code Mono<Foo> getFoo(args);} {@code Flux<Foo> getFoos(args);} where Foo is the REST API 'returned
* entity'.
*
* 2. OR content (value) of {@link ResponseBase} emitted by the reactor publisher returned from proxy method
*
* e.g. {@code Mono<RestResponseBase<headers, Foo>> getFoo(args);} {@code Flux<RestResponseBase<headers, Foo>>
* getFoos(args);} where Foo is the REST API return entity.
*
* @return the entity type.
*/
private static Type extractEntityTypeFromReturnType(HttpResponseDecodeData decodeData) {
Type token = decodeData.getReturnType();
if (TypeUtil.isTypeOrSubTypeOf(token, Mono.class)) {
token = TypeUtil.getTypeArgument(token);
}
if (TypeUtil.isTypeOrSubTypeOf(token, Response.class)) {
token = TypeUtil.getRestResponseBodyType(token);
}
return token;
}
/**
* Checks if the {@code returnType} is a decodable type.
*
* @param returnType The return type of the method.
* @return True if the return type is decodable, false otherwise.
*/
/**
* Ensure that request property and method is set in the response.
*
* @param httpResponse the response to validate
*/
private static void ensureRequestSet(HttpResponse httpResponse) {
Objects.requireNonNull(httpResponse.getRequest());
Objects.requireNonNull(httpResponse.getRequest().getHttpMethod());
}
} | |
Should this just be `Objects.requireNonNull`, if `null` is returned here it will eventually lead to a `NullPointerException` in another, less obvious, location. | private static List<String> toLonLatStrings(GeoPoint point) {
if (point == null) {
return null;
}
return Arrays.asList(String.valueOf(point.getLongitude()), String.valueOf(point.getLatitude()));
} | } | private static List<String> toLonLatStrings(GeoPoint point) {
Objects.requireNonNull(point);
return Arrays.asList(String.valueOf(point.getLongitude()), String.valueOf(point.getLatitude()));
} | class with the given name and GeographyPoint value.
*
* @param name Name of the scoring parameter.
* @param value Value of the scoring parameter.
*/
public ScoringParameter(String name, GeoPoint value) {
this(name, toLonLatStrings(value));
} | class with the given name and GeographyPoint value.
*
* @param name Name of the scoring parameter.
* @param value Value of the scoring parameter.
*/
public ScoringParameter(String name, GeoPoint value) {
this(name, toLonLatStrings(value));
} |
Should we describe that these messages are also autocompleted? | public static void main(String[] args) {
String connectionString = System.getenv("AZURE_SERVICEBUS_CONNECTION_STRING");
ServiceBusReceiverAsyncClient receiverAsyncClient = new ServiceBusClientBuilder()
.connectionString(connectionString)
.buildReceiverClientBuilder()
.queueName("<<queue-name>>")
.buildAsyncClient();
Disposable subscription = receiverAsyncClient.receive()
.subscribe(message -> {
System.out.println("Received Message Id:" + message.getMessageId());
System.out.println("Received Message:" + new String(message.getBody()));
}, error -> System.err.println("Error occurred while receiving message: " + error),
() -> System.out.println("Receiving complete."));
try {
Thread.sleep(Duration.ofSeconds(20).toMillis());
} catch (InterruptedException ignored) {
}
subscription.dispose();
receiverAsyncClient.close();
} | System.out.println("Received Message Id:" + message.getMessageId()); | public static void main(String[] args) {
String connectionString = "Endpoint={fully-qualified-namespace};SharedAccessKeyName={policy-name};"
+ "SharedAccessKey={key}";
ServiceBusReceiverAsyncClient receiverAsyncClient = new ServiceBusClientBuilder()
.connectionString(connectionString)
.receiver()
.queueName("<<queue-name>>")
.buildAsyncClient();
Disposable subscription = receiverAsyncClient.receive()
.subscribe(message -> {
System.out.println("Received Message Id:" + message.getMessageId());
System.out.println("Received Message:" + new String(message.getBody()));
}, error -> System.err.println("Error occurred while receiving message: " + error),
() -> System.out.println("Receiving complete."));
try {
Thread.sleep(Duration.ofSeconds(20).toMillis());
} catch (InterruptedException ignored) {
}
subscription.dispose();
receiverAsyncClient.close();
} | class ReceiveMessageAsyncSample {
/**
* Main method to invoke this demo on how to receive an {@link ServiceBusMessage} from an Azure Service Bus
* Queue
*
* @param args Unused arguments to the program.
*/
} | class ReceiveMessageAsyncSample {
/**
* Main method to invoke this demo on how to receive an {@link ServiceBusMessage} from an Azure Service Bus
* Queue
*
* @param args Unused arguments to the program.
*/
} |
Could this or `toLonLatStrings` be changed so the `NullPointerException` thrown from either of these methods instead of the other constructor when `value` is null. | public ScoringParameter(String name, GeoPoint value) {
this(name, toLonLatStrings(value));
} | this(name, toLonLatStrings(value)); | public ScoringParameter(String name, GeoPoint value) {
this(name, toLonLatStrings(value));
} | class with the given name and GeographyPoint value.
*
* @param name Name of the scoring parameter.
* @param value Value of the scoring parameter.
*/ | class with the given name and GeographyPoint value.
*
* @param name Name of the scoring parameter.
* @param value Value of the scoring parameter.
*/ |
Should make sure the constructor and this method follow the same pattern around ensuring the internal values list isn't mutable. Right now the constructor will clone the list, effectively a deep clone based on how Strings work, ensuring it is immutable if the original list changes but this will return a reference to the internal list that could be mutated. | public List<String> getValues() {
return values;
} | return values; | public List<String> getValues() {
return new ArrayList<>(values);
} | class with the given name and GeographyPoint value.
*
* @param name Name of the scoring parameter.
* @param value Value of the scoring parameter.
*/
public ScoringParameter(String name, GeoPoint value) {
this(name, toLonLatStrings(value));
} | class with the given name and GeographyPoint value.
*
* @param name Name of the scoring parameter.
* @param value Value of the scoring parameter.
*/
public ScoringParameter(String name, GeoPoint value) {
this(name, toLonLatStrings(value));
} |
switch to null pointer exception check. | private static List<String> toLonLatStrings(GeoPoint point) {
if (point == null) {
return null;
}
return Arrays.asList(String.valueOf(point.getLongitude()), String.valueOf(point.getLatitude()));
} | } | private static List<String> toLonLatStrings(GeoPoint point) {
Objects.requireNonNull(point);
return Arrays.asList(String.valueOf(point.getLongitude()), String.valueOf(point.getLatitude()));
} | class with the given name and GeographyPoint value.
*
* @param name Name of the scoring parameter.
* @param value Value of the scoring parameter.
*/
public ScoringParameter(String name, GeoPoint value) {
this(name, toLonLatStrings(value));
} | class with the given name and GeographyPoint value.
*
* @param name Name of the scoring parameter.
* @param value Value of the scoring parameter.
*/
public ScoringParameter(String name, GeoPoint value) {
this(name, toLonLatStrings(value));
} |
Why do we need to repeat creating `flattenValue`? This could be simplified to the following: ```java return name + SEPARATOR + flattenValue; ``` | public String toString() {
String flattenValue = values.stream().filter(value -> !CoreUtils.isNullOrEmpty(value))
.map(this::escapeValue).collect(Collectors.joining(COMMA));
if (CoreUtils.isNullOrEmpty(flattenValue)) {
throw logger.logExceptionAsError(
new IllegalArgumentException("There must be at least one valid value for scoring parameter values."));
}
return name + SEPARATOR + values.stream().filter(value -> !CoreUtils.isNullOrEmpty(value))
.map(this::escapeValue).collect(Collectors.joining(COMMA));
} | .map(this::escapeValue).collect(Collectors.joining(COMMA)); | public String toString() {
String flattenValue = values.stream().filter(value -> !CoreUtils.isNullOrEmpty(value))
.map(ScoringParameter::escapeValue).collect(Collectors.joining(COMMA));
if (CoreUtils.isNullOrEmpty(flattenValue)) {
throw logger.logExceptionAsError(
new IllegalArgumentException("There must be at least one valid value for scoring parameter values."));
}
return name + DASH + flattenValue;
} | class with the given name and GeographyPoint value.
*
* @param name Name of the scoring parameter.
* @param value Value of the scoring parameter.
*/
public ScoringParameter(String name, GeoPoint value) {
this(name, toLonLatStrings(value));
} | class with the given name and GeographyPoint value.
*
* @param name Name of the scoring parameter.
* @param value Value of the scoring parameter.
*/
public ScoringParameter(String name, GeoPoint value) {
this(name, toLonLatStrings(value));
} |
Put the null checking here. | public ScoringParameter(String name, GeoPoint value) {
this(name, toLonLatStrings(value));
} | this(name, toLonLatStrings(value)); | public ScoringParameter(String name, GeoPoint value) {
this(name, toLonLatStrings(value));
} | class with the given name and GeographyPoint value.
*
* @param name Name of the scoring parameter.
* @param value Value of the scoring parameter.
*/ | class with the given name and GeographyPoint value.
*
* @param name Name of the scoring parameter.
* @param value Value of the scoring parameter.
*/ |
Good point. Will deep clone the list. | public List<String> getValues() {
return values;
} | return values; | public List<String> getValues() {
return new ArrayList<>(values);
} | class with the given name and GeographyPoint value.
*
* @param name Name of the scoring parameter.
* @param value Value of the scoring parameter.
*/
public ScoringParameter(String name, GeoPoint value) {
this(name, toLonLatStrings(value));
} | class with the given name and GeographyPoint value.
*
* @param name Name of the scoring parameter.
* @param value Value of the scoring parameter.
*/
public ScoringParameter(String name, GeoPoint value) {
this(name, toLonLatStrings(value));
} |
In our testing subscriptions we can't create a collection with 1M throughput upfront. The workaround we have is to create a collection with 100K throughput and then scale up. Now with this auto deleting collection it means we have to do this manual process everytime. we don't want to always delete the collection. We should have a config option called `autoCollectionCreateEnabled` when present we should attempt to 1. create collection if not exist 2. delete the collection when `autoCollectionCreateEnabled=false` we shouldn't attempt to create or delete the collection, and if the collection doesn't exist we should fail. | void shutdown() {
cosmosAsyncContainer.delete().block();
logger.info("Deleted test container {}" , this.configuration.getCollectionId());
cosmosClient.close();
} | cosmosAsyncContainer.delete().block(); | void shutdown() {
if (this.databaseCreated) {
cosmosAsyncDatabase.delete().block();
logger.info("Deleted temporary database {} created for this test", this.configuration.getDatabaseId());
} else if (this.collectionCreated) {
cosmosAsyncContainer.delete().block();
logger.info("Deleted temporary collection {} created for this test", this.configuration.getCollectionId());
}
cosmosClient.close();
} | class AsyncBenchmark<T> {
private final MetricRegistry metricsRegistry = new MetricRegistry();
private final ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
final Logger logger;
final CosmosAsyncClient cosmosClient;
final CosmosAsyncContainer cosmosAsyncContainer;
final CosmosAsyncDatabase cosmosAsyncDatabase;
final String partitionKey;
final Configuration configuration;
final List<PojoizedJson> docsToRead;
final Semaphore concurrencyControlSemaphore;
Timer latency;
AsyncBenchmark(Configuration cfg) {
cosmosClient = new CosmosClientBuilder()
.endpoint(cfg.getServiceEndpoint())
.key(cfg.getMasterKey())
.connectionPolicy(cfg.getConnectionPolicy())
.consistencyLevel(cfg.getConsistencyLevel())
.buildAsyncClient();
configuration = cfg;
cosmosAsyncDatabase = cosmosClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
cosmosAsyncContainer =
cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), Configuration.PARTITION_KEY, configuration.getThroughput()).block().getContainer();
logger = LoggerFactory.getLogger(this.getClass());
partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
if (configuration.getOperationType() != Configuration.Operation.WriteLatency
&& configuration.getOperationType() != Configuration.Operation.WriteThroughput
&& configuration.getOperationType() != Configuration.Operation.ReadMyWrites) {
String dataFieldValue = RandomStringUtils.randomAlphabetic(cfg.getDocumentDataFieldSize());
for (int i = 0; i < cfg.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
PojoizedJson newDoc = generateDocument(uuid, dataFieldValue);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
init();
if (configuration.isEnableJvmStats()) {
metricsRegistry.register("gc", new GarbageCollectorMetricSet());
metricsRegistry.register("threads", new CachedThreadStatesGaugeSet(10, TimeUnit.SECONDS));
metricsRegistry.register("memory", new MemoryUsageGaugeSet());
}
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
}
protected void init() {
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
protected abstract void performWorkload(BaseSubscriber<T> baseSubscriber, long i) throws Exception;
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
switch (configuration.getOperationType()) {
case ReadLatency:
case WriteLatency:
case QueryInClauseParallel:
case QueryCross:
case QuerySingle:
case QuerySingleMany:
case QueryParallel:
case QueryOrderby:
case QueryAggregate:
case QueryAggregateTopOrderby:
case QueryTopOrderby:
case Mixed:
latency = metricsRegistry.timer("Latency");
break;
default:
break;
}
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
long startTime = System.currentTimeMillis();
AtomicLong count = new AtomicLong(0);
long i;
for ( i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<T> baseSubscriber = new BaseSubscriber<T>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(T value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsyncBenchmark.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}" ,
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsyncBenchmark.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
public PojoizedJson generateDocument(String idString, String dataFieldValue) {
PojoizedJson instance = new PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
} | class AsyncBenchmark<T> {
private final MetricRegistry metricsRegistry = new MetricRegistry();
private final ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private boolean databaseCreated;
private boolean collectionCreated;
final Logger logger;
final CosmosAsyncClient cosmosClient;
CosmosAsyncContainer cosmosAsyncContainer;
CosmosAsyncDatabase cosmosAsyncDatabase;
final String partitionKey;
final Configuration configuration;
final List<PojoizedJson> docsToRead;
final Semaphore concurrencyControlSemaphore;
Timer latency;
AsyncBenchmark(Configuration cfg) {
cosmosClient = new CosmosClientBuilder()
.endpoint(cfg.getServiceEndpoint())
.key(cfg.getMasterKey())
.connectionPolicy(cfg.getConnectionPolicy())
.consistencyLevel(cfg.getConsistencyLevel())
.buildAsyncClient();
configuration = cfg;
logger = LoggerFactory.getLogger(this.getClass());
try {
cosmosAsyncDatabase = cosmosClient.getDatabase(this.configuration.getDatabaseId()).read().block().getDatabase();
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
cosmosAsyncDatabase = cosmosClient.createDatabase(cfg.getDatabaseId()).block().getDatabase();
logger.info("Database {} is created for this test", this.configuration.getDatabaseId());
databaseCreated = true;
} else {
throw e;
}
}
try {
cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId()).read().block().getContainer();
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
cosmosAsyncContainer =
cosmosAsyncDatabase.createContainer(this.configuration.getCollectionId(), Configuration.DEFAULT_PARTITION_KEY_PATH, this.configuration.getThroughput()).block().getContainer();
logger.info("Collection {} is created for this test", this.configuration.getCollectionId());
collectionCreated = true;
} else {
throw e;
}
}
partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
if (configuration.getOperationType() != Configuration.Operation.WriteLatency
&& configuration.getOperationType() != Configuration.Operation.WriteThroughput
&& configuration.getOperationType() != Configuration.Operation.ReadMyWrites) {
String dataFieldValue = RandomStringUtils.randomAlphabetic(cfg.getDocumentDataFieldSize());
for (int i = 0; i < cfg.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
PojoizedJson newDoc = generateDocument(uuid, dataFieldValue);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
init();
if (configuration.isEnableJvmStats()) {
metricsRegistry.register("gc", new GarbageCollectorMetricSet());
metricsRegistry.register("threads", new CachedThreadStatesGaugeSet(10, TimeUnit.SECONDS));
metricsRegistry.register("memory", new MemoryUsageGaugeSet());
}
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
}
protected void init() {
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
protected abstract void performWorkload(BaseSubscriber<T> baseSubscriber, long i) throws Exception;
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
switch (configuration.getOperationType()) {
case ReadLatency:
case WriteLatency:
case QueryInClauseParallel:
case QueryCross:
case QuerySingle:
case QuerySingleMany:
case QueryParallel:
case QueryOrderby:
case QueryAggregate:
case QueryAggregateTopOrderby:
case QueryTopOrderby:
case Mixed:
latency = metricsRegistry.timer("Latency");
break;
default:
break;
}
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
long startTime = System.currentTimeMillis();
AtomicLong count = new AtomicLong(0);
long i;
for ( i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<T> baseSubscriber = new BaseSubscriber<T>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(T value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsyncBenchmark.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}" ,
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsyncBenchmark.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
public PojoizedJson generateDocument(String idString, String dataFieldValue) {
PojoizedJson instance = new PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
} |
if the application deletes it should create too. when a user runs an application, the user expects to either 1. application handles the collection creation/deletion 2. the user provides the collection and the app doesn't create nor deletes. | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer =
cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), Configuration.PARTITION_KEY, configuration.getThroughput()).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
clientDocsMap.put(asyncClient, docsToRead);
}
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}", clientDocsMap.size());
} catch (FileNotFoundException e) {
logger.error(e.getMessage());
} catch (IOException e) {
logger.error(e.getMessage());
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
} | cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), Configuration.PARTITION_KEY, configuration.getThroughput()).block().getContainer(); | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
CosmosAsyncDatabase cosmosAsyncDatabase = null;
CosmosAsyncContainer cosmosAsyncContainer = null;
boolean databaseCreated = false;
try {
cosmosAsyncDatabase = asyncClient.getDatabase(this.configuration.getDatabaseId()).read().block().getDatabase();
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
cosmosAsyncDatabase = asyncClient.createDatabase(this.configuration.getDatabaseId()).block().getDatabase();
logger.info("Database {} is created for this test on host {}", this.configuration.getDatabaseId(), endpoint);
databaseCreated = true;
databaseListToClear.add(cosmosAsyncDatabase);
} else {
throw e;
}
}
try {
cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId()).read().block().getContainer();
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
cosmosAsyncContainer =
cosmosAsyncDatabase.createContainer(this.configuration.getCollectionId(), Configuration.DEFAULT_PARTITION_KEY_PATH, this.configuration.getThroughput()).block().getContainer();
logger.info("Collection {} is created for this test on host {}", this.configuration.getCollectionId(), endpoint);
if(!databaseCreated) {
collectionListToClear.add(cosmosAsyncContainer);
}
} else {
throw e;
}
}
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
clientDocsMap.put(asyncClient, docsToRead);
}
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}", clientDocsMap.size());
} catch (FileNotFoundException e) {
logger.error(e.getMessage());
} catch (IOException e) {
logger.error(e.getMessage());
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
} | class AsynReadWithMultipleClients<T> {
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.getDatabase(configuration.getDatabaseId()).getContainer(this.configuration.getCollectionId()).delete().block();
asyncClient.close();
}
logger.info("Deleted test containers {} for all the hosts" , this.configuration.getCollectionId());
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} | class AsynReadWithMultipleClients<T> {
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
private List<CosmosAsyncDatabase> databaseListToClear = new ArrayList<>();
private List<CosmosAsyncContainer> collectionListToClear = new ArrayList<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncDatabase database : databaseListToClear) {
database.delete().block();
}
if (databaseListToClear.size() > 0) {
logger.info("Deleted database {} created on accounts for this test", this.configuration.getDatabaseId());
}
for (CosmosAsyncContainer container : collectionListToClear) {
container.delete().block();
}
if (collectionListToClear.size() > 0) {
logger.info("Deleted collection {} created on accounts for this test", this.configuration.getCollectionId());
}
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} |
Please check current design , it is more seem less in respect to customer expectation , code will only create resource if it is not present , and only delete newly created resource and not the existing | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
CosmosAsyncDatabase cosmosAsyncDatabase = asyncClient.createDatabaseIfNotExists(this.configuration.getDatabaseId()).block().getDatabase();
CosmosAsyncContainer cosmosAsyncContainer =
cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), Configuration.PARTITION_KEY, configuration.getThroughput()).block().getContainer();
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
clientDocsMap.put(asyncClient, docsToRead);
}
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}", clientDocsMap.size());
} catch (FileNotFoundException e) {
logger.error(e.getMessage());
} catch (IOException e) {
logger.error(e.getMessage());
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
} | cosmosAsyncDatabase.createContainerIfNotExists(configuration.getCollectionId(), Configuration.PARTITION_KEY, configuration.getThroughput()).block().getContainer(); | private void createClients() {
String csvFile = "clientHostAndKey.txt";
String line = "";
String splitBy = ";";
try (BufferedReader br = new BufferedReader(new FileReader(csvFile))) {
while ((line = br.readLine()) != null) {
String[] hostAndKey = line.split(splitBy);
if (hostAndKey.length >= 2) {
String endpoint = hostAndKey[0].substring(hostAndKey[0].indexOf(ACCOUNT_ENDPOINT_TAG) + ACCOUNT_ENDPOINT_TAG.length());
String key = hostAndKey[1].substring(hostAndKey[1].indexOf(ACCOUNT_KEY_TAG) + ACCOUNT_KEY_TAG.length());
CosmosAsyncClient asyncClient = new CosmosClientBuilder()
.endpoint(endpoint)
.key(key)
.connectionPolicy(configuration.getConnectionPolicy())
.consistencyLevel(configuration.getConsistencyLevel())
.connectionReuseAcrossClientsEnabled(true)
.buildAsyncClient();
List<PojoizedJson> docsToRead = new ArrayList<>();
CosmosAsyncDatabase cosmosAsyncDatabase = null;
CosmosAsyncContainer cosmosAsyncContainer = null;
boolean databaseCreated = false;
try {
cosmosAsyncDatabase = asyncClient.getDatabase(this.configuration.getDatabaseId()).read().block().getDatabase();
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
cosmosAsyncDatabase = asyncClient.createDatabase(this.configuration.getDatabaseId()).block().getDatabase();
logger.info("Database {} is created for this test on host {}", this.configuration.getDatabaseId(), endpoint);
databaseCreated = true;
databaseListToClear.add(cosmosAsyncDatabase);
} else {
throw e;
}
}
try {
cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId()).read().block().getContainer();
} catch (CosmosClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) {
cosmosAsyncContainer =
cosmosAsyncDatabase.createContainer(this.configuration.getCollectionId(), Configuration.DEFAULT_PARTITION_KEY_PATH, this.configuration.getThroughput()).block().getContainer();
logger.info("Collection {} is created for this test on host {}", this.configuration.getCollectionId(), endpoint);
if(!databaseCreated) {
collectionListToClear.add(cosmosAsyncContainer);
}
} else {
throw e;
}
}
String partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition()
.getPaths().iterator().next().split("/")[1];
String dataFieldValue = RandomStringUtils.randomAlphabetic(this.configuration.getDocumentDataFieldSize());
ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>();
for (int i = 0; i < this.configuration.getNumberOfPreCreatedDocuments(); i++) {
String uuid = UUID.randomUUID().toString();
com.azure.cosmos.benchmark.PojoizedJson newDoc = generateDocument(uuid, dataFieldValue, partitionKey);
Flux<PojoizedJson> obs = cosmosAsyncContainer.createItem(newDoc).map(resp -> {
com.azure.cosmos.benchmark.PojoizedJson x =
resp.getItem();
return x;
}
).flux();
createDocumentObservables.add(obs);
}
docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block();
logger.info("Client have been initialized with data created for host {}", hostAndKey[0]);
clientDocsMap.put(asyncClient, docsToRead);
}
}
logger.info("Total number of client created for ReadThroughputWithMultipleClient {}", clientDocsMap.size());
} catch (FileNotFoundException e) {
logger.error(e.getMessage());
} catch (IOException e) {
logger.error(e.getMessage());
}
MeterRegistry registry = configuration.getAzureMonitorMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
registry = configuration.getGraphiteMeterRegistry();
if (registry != null) {
BridgeInternal.monitorTelemetry(registry);
}
} | class AsynReadWithMultipleClients<T> {
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.getDatabase(configuration.getDatabaseId()).getContainer(this.configuration.getCollectionId()).delete().block();
asyncClient.close();
}
logger.info("Deleted test containers {} for all the hosts" , this.configuration.getCollectionId());
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} | class AsynReadWithMultipleClients<T> {
private final static String ACCOUNT_ENDPOINT_TAG = "AccountEndpoint=";
private final static String ACCOUNT_KEY_TAG = "AccountKey=";
private final Semaphore concurrencyControlSemaphore;
private final Logger logger;
private final Configuration configuration;
private MetricRegistry metricsRegistry = new MetricRegistry();
private ScheduledReporter reporter;
private Meter successMeter;
private Meter failureMeter;
private Timer latency;
private Map<CosmosAsyncClient, List<PojoizedJson>> clientDocsMap = new HashMap<>();
private List<CosmosAsyncDatabase> databaseListToClear = new ArrayList<>();
private List<CosmosAsyncContainer> collectionListToClear = new ArrayList<>();
AsynReadWithMultipleClients(Configuration cfg) {
logger = LoggerFactory.getLogger(this.getClass());
this.configuration = cfg;
createClients();
if (configuration.getGraphiteEndpoint() != null) {
final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort()));
reporter = GraphiteReporter.forRegistry(metricsRegistry)
.prefixedWith(configuration.getOperationType().name())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite);
} else {
reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS).build();
}
concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency());
}
void run() throws Exception {
successMeter = metricsRegistry.meter("
failureMeter = metricsRegistry.meter("
latency = metricsRegistry.timer("Latency");
reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS);
AtomicLong count = new AtomicLong(0);
long i;
long startTime = System.currentTimeMillis();
for (i = 0; shouldContinue(startTime, i); i++) {
BaseSubscriber<PojoizedJson> baseSubscriber = new BaseSubscriber<PojoizedJson>() {
@Override
protected void hookOnSubscribe(Subscription subscription) {
super.hookOnSubscribe(subscription);
}
@Override
protected void hookOnNext(PojoizedJson value) {
logger.debug("hookOnNext: {}, count:{}", value, count.get());
}
@Override
protected void hookOnCancel() {
this.hookOnError(new CancellationException());
}
@Override
protected void hookOnComplete() {
successMeter.mark();
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onSuccess();
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
@Override
protected void hookOnError(Throwable throwable) {
failureMeter.mark();
logger.error("Encountered failure {} on thread {}",
throwable.getMessage(), Thread.currentThread().getName(), throwable);
concurrencyControlSemaphore.release();
AsynReadWithMultipleClients.this.onError(throwable);
synchronized (count) {
count.incrementAndGet();
count.notify();
}
}
};
performWorkload(baseSubscriber, i);
}
synchronized (count) {
while (count.get() < i) {
count.wait();
}
}
long endTime = System.currentTimeMillis();
logger.info("[{}] operations performed in [{}] seconds.",
configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000));
reporter.report();
reporter.close();
}
void shutdown() {
for (CosmosAsyncDatabase database : databaseListToClear) {
database.delete().block();
}
if (databaseListToClear.size() > 0) {
logger.info("Deleted database {} created on accounts for this test", this.configuration.getDatabaseId());
}
for (CosmosAsyncContainer container : collectionListToClear) {
container.delete().block();
}
if (collectionListToClear.size() > 0) {
logger.info("Deleted collection {} created on accounts for this test", this.configuration.getCollectionId());
}
for (CosmosAsyncClient asyncClient : clientDocsMap.keySet()) {
asyncClient.close();
}
}
protected void onSuccess() {
}
protected void onError(Throwable throwable) {
}
private PojoizedJson generateDocument(String idString, String dataFieldValue, String partitionKey) {
com.azure.cosmos.benchmark.PojoizedJson instance = new com.azure.cosmos.benchmark.PojoizedJson();
Map<String, String> properties = instance.getInstance();
properties.put("id", idString);
properties.put(partitionKey, idString);
for (int i = 0; i < configuration.getDocumentDataFieldCount(); i++) {
properties.put("dataField" + i, dataFieldValue);
}
return instance;
}
private boolean shouldContinue(long startTimeMillis, long iterationCount) {
Duration maxDurationTime = configuration.getMaxRunningTimeDuration();
int maxNumberOfOperations = configuration.getNumberOfOperations();
if (maxDurationTime == null) {
return iterationCount < maxNumberOfOperations;
}
if (startTimeMillis + maxDurationTime.toMillis() < System.currentTimeMillis()) {
return false;
}
if (maxNumberOfOperations < 0) {
return true;
}
return iterationCount < maxNumberOfOperations;
}
private void performWorkload(BaseSubscriber<PojoizedJson> baseSubscriber, long i) throws InterruptedException {
Mono<PojoizedJson> result;
int clientIndex = (int) (i % clientDocsMap.size());
CosmosAsyncClient client = (CosmosAsyncClient) clientDocsMap.keySet().toArray()[clientIndex];
int docIndex = (int) i % clientDocsMap.get(client).size();
PojoizedJson doc = clientDocsMap.get(client).get(docIndex);
String partitionKeyValue = doc.getId();
result = client.getDatabase(configuration.getDatabaseId()).getContainer(configuration.getCollectionId()).readItem(doc.getId(),
new PartitionKey(partitionKeyValue),
PojoizedJson.class).map(CosmosAsyncItemResponse::getItem);
concurrencyControlSemaphore.acquire();
AsyncReadBenchmark.LatencySubscriber<PojoizedJson> latencySubscriber = new AsyncReadBenchmark.LatencySubscriber<>(baseSubscriber);
latencySubscriber.context = latency.time();
result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber);
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.